diff --git "a/663.jsonl" "b/663.jsonl" new file mode 100644--- /dev/null +++ "b/663.jsonl" @@ -0,0 +1,639 @@ +{"seq_id":"307852527","text":"import numpy as np\nfrom extract_data import initialize_file, extract_pixel\nfrom sklearn.preprocessing import normalize\ndef create_SAD_mat(data, winsize=3):\n print(data.shape)\n mask = np.zeros(shape=(data.shape[0]-winsize+1,data.shape[1]-winsize+1))\n for x in range(data.shape[0]-winsize+1):\n for y in range(data.shape[1]-winsize+1):\n center = extract_pixel(x,y, data).transpose()\n norm = np.linalg.norm(center)\n for z in range(winsize):\n for n in range(winsize):\n other = extract_pixel(z+x,n+y,data).transpose()\n norm_other = np.linalg.norm(other)\n spec_angle = np.arccos((center*other)/(norm*norm_other))\n mask[x][y] = np.mean(spec_angle)\n #if x%10 == 0:\n # print('X ',x,'Y ',y)\n mask = normalize(mask) \n return mask \ndef threshold_mask(mask):\n mask_high = np.zeros(shape=(mask.shape[0],mask.shape[1]),dtype=np.int64)\n mask_low = np.zeros(shape=(mask.shape[0],mask.shape[1]), dtype=np.int64)\n for x in range(mask.shape[0]):\n for y in range(mask.shape[1]):\n if(mask[x,y] < np.mean(mask)):\n mask_high[x,y] = 0\n else:\n mask_high[x,y] = 1\n mask_low = np.invert(mask_high)\n print(mask.shape)\n return mask_high, mask_low\ndef add_padding(data, winsize):\n data_pad = np.empty((data.shape[0]+winsize-1, data.shape[1]+winsize-1, 0))\n for x in range(data.shape[2]):\n data_temp = np.pad(data[:,:,x], 1, 'mean')\n data_temp = np.expand_dims(data_temp, axis=0)\n data_temp = np.reshape(data_temp, (data_temp.shape[1], data_temp.shape[2], data_temp.shape[0]))\n data_pad = np.append(data_pad, data_temp, axis=2)\n return data_pad\ndef apply_mask(data, low_path, high_path):\n #Create a empty data sets to hold high and low structure data\n data_high = np.zeros(shape=(data.shape[0],data.shape[1],0))\n data_low = np.zeros(shape=(data.shape[0],data.shape[1],0))\n #Load the masks from file\n high_mask = np.load(high_path)\n low_mask = np.load(low_path)\n #apply the mask to the provided data\n for x in range(data.shape[2]):\n data_high = np.append(data_high, np.expand_dims(np.multiply(data[:,:,x], high_mask), axis=2), axis=2)\n data_low = np.append(data_low, np.expand_dims(np.multiply(data[:,:,x], low_mask), axis=2), axis=2)\n #reshape the data to match to a set of samples\n data_high = np.reshape(data_high, (data.shape[0]*data.shape[1], data.shape[2]), order='C')\n data_low = np.reshape(data_low, (data.shape[0]*data.shape[1], data.shape[2]), order='C')\n #find the the columns of the data that contains only zeros and remove them\n bad_cols_high = np.where(data_high.sum(axis=1) == 0)[0]\n data_high = np.delete(data_high, bad_cols_high, axis=0)\n bad_cols_low = np.where(data_low.sum(axis=1) == 0)[0]\n data_low = np.delete(data_low, bad_cols_low, axis=0)\n return data_high, data_low\n","sub_path":"segmentation.py","file_name":"segmentation.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"32090268","text":"#!/usr/bin/python\n# -*- coding: latin-1 -*-\n\nimport urwid\nimport json\nimport subprocess\n\nclass TaskWarrior(object):\n\n def pending_tasks(self):\n raw_output = subprocess.check_output(['task', 'export',\n 'status:pending'])\n\n task_json = '[%s]' % raw_output\n return json.loads(task_json, strict=False)\n\n def complete(self, task):\n subprocess.call(['task', task['uuid'], 'done'])\n\n def delete(self, task):\n subprocess.call(['task', task['uuid'], 'rc.confirmation:no', 'del'])\n\n def add(self, value):\n subprocess.call(['task', 'add', value])\n\n def mod(self, task, value):\n subprocess.call(['task', task['uuid'], 'mod', value])\n\n def undo(self):\n subprocess.call(['task', 'rc.confirmation:no', 'undo'])\n\n\nclass TaskWidget (urwid.WidgetWrap):\n\n def __init__ (self, task):\n self.task = task\n\n desc = urwid.Text(task.get('description', ''))\n proj = urwid.Text(task.get('project','') + ' ', align='right')\n\n due = urwid.Text(task.get('due',''), align='right')\n\n item = urwid.AttrMap(urwid.Columns([\n ('fixed', 11, urwid.AttrWrap(proj, 'proj', 'proj_focus')),\n desc,\n due\n ]), 'body', 'body_focus')\n\n urwid.WidgetWrap.__init__(self, item)\n\n def selectable (self):\n return True\n\n def keypress(self, size, key):\n return key\n\n\nclass LineEditor(urwid.Edit):\n\n __metaclass__ = urwid.signals.MetaSignals\n signals = ['done']\n\n def keypress(self, size, key):\n if key == 'enter':\n urwid.emit_signal(self, 'done', self.get_edit_text())\n return\n\n if key == 'esc':\n urwid.emit_signal(self, 'done', None)\n return\n\n return urwid.Edit.keypress(self, size, key)\n\n\nclass ScrollingListBox(urwid.ListBox):\n\n def scroll_down(self):\n self.set_focus(self.get_focus()[1] + 1)\n\n def scroll_up(self):\n focus = self.get_focus()[1]\n if focus > 0:\n self.set_focus(focus - 1)\n\n def mouse_event(self, size, event, button, col, row, focus):\n button_map = {\n 4: self.scroll_down,\n 5: self.scroll_up\n }\n\n if button in button_map:\n button_map[button]()\n\n return urwid.ListBox.mouse_event(self, size, event, button, col, row, focus)\n\n def keypress(self, size, key):\n key_map = {\n 'j': self.scroll_down,\n 'k': self.scroll_up\n }\n\n if key in key_map:\n key_map[key]()\n\n return urwid.ListBox.keypress(self, size, key)\n\n\n\nclass Tasky(object):\n\n palette = [\n ('proj', '', '', '', 'dark green', ''),\n ('proj_focus', '', '', '', 'dark gray', 'dark green'),\n ('body','', '', '', 'dark blue', ''),\n ('body_focus', '', '', '', 'dark gray', 'dark cyan'),\n ('head','light red', 'black'),\n ]\n\n def __init__(self):\n\n self.warrior = TaskWarrior()\n\n header = urwid.AttrMap(urwid.Text('tasky.α'), 'head')\n self.walker = urwid.SimpleListWalker([])\n self.list_box = ScrollingListBox(self.walker)\n self.view = urwid.Frame(urwid.AttrWrap(self.list_box, 'body'), header=header)\n self.refresh()\n\n loop = urwid.MainLoop(self.view, Tasky.palette, unhandled_input=self.keystroke)\n loop.screen.set_terminal_properties(colors=256)\n loop.run()\n\n def refresh(self):\n self.walker[:] = map(TaskWidget, self.warrior.pending_tasks())\n\n def keystroke(self, input):\n def exit():\n raise urwid.ExitMainLoop()\n\n def undo():\n self.warrior.undo()\n self.refresh()\n\n view_action_map = {\n 'q': exit,\n 'Q': exit,\n 'r': self.refresh,\n 'u': undo,\n 'i': self.new_task,\n ':': self.command_mode,\n '!': self.shell_mode\n }\n\n task_action_map = {\n 'enter': (self.edit_task, False),\n 'e': (self.edit_task, False),\n 'n': (self.task_note, False),\n 'c': (self.warrior.complete, True),\n 'd': (self.warrior.delete, True),\n }\n\n if input in view_action_map:\n view_action_map[input]()\n\n if input in task_action_map:\n (action, should_refresh) = task_action_map[input]\n action(self.selected_task())\n if should_refresh:\n self.refresh()\n\n def selected_task(self):\n return self.list_box.get_focus()[0].task\n\n\n def task_note(self, task):\n subprocess.call(\"tmux split-window 'tasknote %i'\" % task['id'], shell=True)\n\n def present_editor(self, prompt, text, handler):\n self.foot = LineEditor(prompt, text)\n self.view.set_footer(self.foot)\n self.view.set_focus('footer')\n urwid.connect_signal(self.foot, 'done', handler)\n\n def command_mode(self):\n self.present_editor(': ', '', self.command_done)\n\n def shell_mode(self):\n self.present_editor('! ', '', self.shell_done)\n\n def edit_task(self, task):\n self.edited_task = task\n self.present_editor(' >> ', task['description'], self.edit_done)\n\n def new_task(self):\n self.present_editor(' >> ', '', self.new_done)\n\n def dismiss_editor(action):\n def wrapped(self, content):\n self.view.set_focus('body')\n urwid.disconnect_signal(self, self.foot, 'done', action)\n if content:\n action(self, content)\n self.view.set_footer(None)\n\n return wrapped\n\n @dismiss_editor\n def edit_done(self, content):\n self.warrior.mod(self.edited_task, content)\n self.edited_task = None\n self.refresh()\n\n @dismiss_editor\n def new_done(self, content):\n self.warrior.add(content)\n self.refresh()\n\n @dismiss_editor\n def command_done(self, content):\n subprocess.call(\"task %s\" % content)\n self.refresh()\n\n @dismiss_editor\n def shell_done(self, content):\n subprocess.call(\"tmux split-window '%s'\" % content, shell=True)\n\n\n\nif __name__ == '__main__':\n Tasky()\n","sub_path":"all-gists/3390779/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":6139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"313902578","text":"import discord\r\nimport asyncio\r\nimport os\r\n\r\nclient=discord.Client()\r\n\r\n@client.event\r\nasync def on_ready():\r\n print('Logged in as')\r\n print(client.user.name)\r\n print(client.user.id)\r\n print('------')\r\n \r\n@client.event\r\nasync def on_message(message):\r\n #await message.channel.send(\"test\")\r\n if message.author == client.user:\r\n return\r\n if (message.content.split()[0]==\"!역할지급\")&(str(message.channel)==\"역할_요청\"):\r\n if message.content==\"!역할지급\":\r\n await message.channel.send(\"!역할지급 [일반인|연구원]\")\r\n elif message.content.split()[1]==\"일반인\":\r\n role = discord.utils.get(message.guild.roles, name=\"연구원\")\r\n await message.author.remove_roles(role)\r\n role = discord.utils.get(message.guild.roles, name=\"승인됨\")\r\n await message.author.add_roles(role)\r\n role = discord.utils.get(message.guild.roles, name=\"일반인\")\r\n await message.author.add_roles(role)\r\n await message.channel.send(\"일반인 역할이 지급되었습니다.\")\r\n elif message.content.split()[1]==\"연구원\":\r\n role = discord.utils.get(message.guild.roles, name=\"일반인\")\r\n await message.author.remove_roles(role)\r\n role = discord.utils.get(message.guild.roles, name=\"승인됨\")\r\n await message.author.add_roles(role)\r\n role = discord.utils.get(message.guild.roles, name=\"연구원\")\r\n await message.author.add_roles(role)\r\n await message.channel.send(\"연구원 역할이 지급되었습니다.\")\r\n elif message.content.split()[1]==\"관리자\":\r\n role = discord.utils.get(message.guild.roles, name=\"광리자\")\r\n await message.author.add_roles(role)\r\n await message.channel.send(\"_광_리자 역할이 지급되었습니다.\")\r\n elif message.content.split()[1]==\"광리자\":\r\n role = discord.utils.get(message.guild.roles, name=\"광리자\")\r\n await message.author.add_roles(role)\r\n await message.channel.send(\"광리자 역할이 지급되었습니다.\")\r\n elif message.content.split()[1]==\"모나리자\":\r\n role = discord.utils.get(message.guild.roles, name=\"모나리자\")\r\n await message.author.add_roles(role)\r\n await message.channel.send(\"모나리자 역할이 지급되었습니다.\")\r\n elif message.content.split()[1]==\"0도미드스핀의발견자\":\r\n await message.channel.send(\"핫덕님 겁니다.\")\r\n else:\r\n await message.channel.send(message.content.split()[0]+\" 역할을 찾을 수 없습니다. (일반인/연구원)\")\r\n elif (message.content.split()[0]==\"!광리자\")&(str(message.channel)==\"역할_요청\"):\r\n role = discord.utils.get(message.guild.roles, name=\"광리자\")\r\n await message.author.remove_roles(role)\r\n\r\n#@client.event\r\n#async def on_member_join(self):\r\n# ment = self.mention\r\n# await self.get_channel(697660178190958664).send(\"{has joined the server.\")\r\naccess_token=os.environ[\"BOT_TOKEN\"]\r\nclient.run(\"Njk3NzMxMjIwNTgyMTcwNjg2.Xo872w\"+\".CmYnEna4_nmomI0Gtw_XwsUk4bQ\")\r\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"390855808","text":"from django.contrib import admin\nfrom .models import *\n#定义高级管理\nclass AuthorAdmin(admin.ModelAdmin):\n\n #定义显示在列表页的字段\n list_display = ('name','age','email')\n # 定义在列表页中也能够连接到详情页的字段们\n list_display_links =('email','name',)\n #定义在列表页中就允许修改的字段们\n list_editable =('age',)\n # 添加允许被搜索的字段们\n search_fields = ('name','email',)\n # 添加允许被搜索的字段们\n list_filter = ('name',)\n #fields = (\"name\",)\n fieldsets = (\n (\"基本信息\",{\n 'fields':(\"name\",\"age\"),\n \"classes\":(\"collapse\",),\n\n }),\n (\"附加信息\", {\n 'fields': (\"email\", \"isActive\",'publishers'),\n \"classes\": (\"collapse\",),\n\n })\n )\n\nclass BookAdmin(admin.ModelAdmin):\n date_hierarchy = (\"publicate_date\")\n\nclass PublisherAdmin(admin.ModelAdmin):\n # 定义显示在列表页的字段\n list_display = ('name', 'address', 'city',)\n #列表中address,city可编辑\n list_editable = ('city','address',)\n #右侧增加过滤器,允许按照city筛选\n list_filter = ('city',)\n # 添加允许被搜索的字段们\n search_fields = ('name', 'address',)\n #详细页分组显示\n fieldsets = (\n (\n \"基本选项\",{\n 'fields':(\"name\",\"address\",\"city\"),\n }\n ),\n (\n \"高级选项\",{\n 'fields':(\"country\",\"website\"),\n 'classes':(\"collapse\",),\n }\n )\n )\n\n# Register your models here.\nadmin.site.register(Book,BookAdmin)\nadmin.site.register(Author,AuthorAdmin)\nadmin.site.register(Publisher,PublisherAdmin)\nadmin.site.register(Wife)\nadmin.site.register(Person)\n","sub_path":"aid1807正式班老师课件/Django/Day05-1(DjangoDemon03最终版)/DjangoDemo03/index/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"506283835","text":"from sentence_transformers import SentenceTransformer\nimport pandas as pd\nimport hickle as hkl\n\n\n\nCSV_FILE = \"who_covid_19_qa.csv\"\nmodel = SentenceTransformer('bert-base-nli-mean-tokens')\n\ndata = pd.read_csv(CSV_FILE)\n\nprint(data[\"question\"])\n\nsentences = data[\"question\"].to_list()\nsentence_embeddings = model.encode(sentences)\n\nprint(len(sentence_embeddings))\nprint(len(sentence_embeddings[0]))\n\nhkl.dump(sentence_embeddings, 'who_covid_19_question_embedding.hkl', mode='w', compression='gzip')","sub_path":"gen_embedding.py","file_name":"gen_embedding.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"486185044","text":" #!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 29 15:54:58 2018\n\n@author: christian michelsen \n\"\"\"\n\n# to ignore deprecation warnings:\nimport warnings\nwarnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\nwarnings.filterwarnings(\"ignore\", message=\"numpy.ufunc size changed\")\nwarnings.filterwarnings(module='sklearn*', action='ignore', category=DeprecationWarning)\n\n#imports \n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport uproot\nimport pandas as pd\nimport scipy as sp\nimport seaborn as sns\nfrom sklearn.metrics import roc_auc_score, roc_curve, auc\nfrom copy import copy\nimport pickle\nimport shap\n\n\nfrom CM_extra_funcs import ( \n do_load_filename, \n MyTimer,\n is_this_hep,\n color_dict,\n print_headline,\n\n load_data_file,\n load_model,\n save_model,\n \n # train_test_split_non_random,\n train_test_index,\n \n check_event_numbers_are_matching,\n check_qmatch_are_correct,\n \n CV_EarlyStoppingTrigger,\n plot_cv_res,\n plot_cv_test_results,\n plot_random_search,\n \n degrees_of_freedom,\n \n get_dict_split,\n get_dict_flavor,\n # SeriesWrapper,\n # DataFrameWrapper,\n PandasContainer,\n )\n\n\n#%% ===========================================================================\n# Initial parameters\n# =============================================================================\n\ndown_sample = 0.01\nverbose = True\ncpu_n_jobs = 20\nonly_vertex_vars = True\n\n\nforce_rerun = False\ncreate_plots = False\nsave_plots = False\ncreate_pairgrid_plot = False\ncreate_shap_plot = True\nclose_figure_after_saving = False\n\n\n\nnum_boost_round = 10000\nearly_stopping_rounds = 1000\ntest_set_frac = 0.1\n\nn_fold = 5\nn_sigma = 1\nn_iter_RS = 100\n\nis_hep = is_this_hep(local_cores=8)\nif not is_hep:\n cpu_n_jobs = 7\nprint_headline(f\"Running CM_analysis_ML with {cpu_n_jobs} cores\")\n\n\nbranches_to_import = None \nfilename = './data/outputfile_Ref12.root'\ntreename_data = 'ntuple_DataBtag'\ntreename_MC = 'ntuple_McBtag'\nprogress_folder = './CM_saved_progress/'\n\n\n# use these columns as variables\nvertex_vars = ['costheta', 'projet', 'bqvjet','ptljet']\nshape_vars = ['sphjet','pt2jet','muljet']\n\nif only_vertex_vars:\n MC_cols = vertex_vars\n str_vars = 'vertex_vars'\n print(\"\\nusing only vertex variables:\", MC_cols)\nelse: \n MC_cols = vertex_vars + shape_vars\n str_vars = 'all_vars'\n print(\"\\nusing all variables (vertex and shape):\", MC_cols)\n# MC_cols = ['bqvjet', 'muljet', 'projet', 'pt2jet', 'ptljet', 'sphjet', 'phijet']\n\n\nplt.rcParams.update({'font.size': 20})\nplt.rcParams.update({'lines.linewidth': 2})\nplt.rcParams.update({'figure.figsize': (14,8)})\n\nsns.set()\n\n\n\n#%% ===========================================================================\n# Load data and split into 2- and 3-jet events \n# =============================================================================\n\n\n# load real data\ndf_data_all = load_data_file(filename, treename_data, branches_to_import, \n verbose, save_type='hdf5')\n# load MC\ndf_MC_all = load_data_file(filename, treename_MC, branches_to_import, \n verbose, save_type='hdf5')\n\n#keep all original data\ndf_data = df_data_all.copy()\n# TODO notice here that we do not split on 'datatype'\n# df_MC = df_MC_all[df_MC_all.datatype == 1].copy() # normal MC \n# df_MCb = df_MC_all[df_MC_all.datatype == 2].copy() # MC b, i.e. extra b-events\ndf_MC = df_MC_all.copy() # normal MC \n\n\n# downsample data if needed to 'down_sample'\nif down_sample < 1:\n print(f'\\nDownsampling to {down_sample*100:.2f}%')\n df_MC = df_MC.iloc[:int((len(df_MC)*down_sample)//2)*2]\n\n# # get 2-jet events and 3-jet\n# df_MC_2j = df_MC[df_MC.njet == 2]\ndf_MC_3j = df_MC[df_MC.njet == 3]\n\n\n# check that the two jets share same event number:\n# check_event_numbers_are_matching(df_MC_2j)\ncheck_event_numbers_are_matching(df_MC_3j)\n\n# TODO qmatch, check_qmatch_are_correct\n\n\n#%% ===========================================================================\n# Original DF to X, y and dicts using DataFrameContainer dfc \n# =============================================================================\n\n# # use the MC columns as variables:\n# X_MC_2j = df_MC_2j.loc[:, MC_cols]\nX_MC_3j = df_MC_3j.loc[:, MC_cols]\n\n# # and use flevt as y-value\n# flevt_MC_2j = df_MC_2j.loc[:, 'flevt']\nflevt_MC_3j = df_MC_3j.loc[:, 'flevt']\n\n# # set all values where flevt is 5 to 1 (ie. bb), 4 (ie. cc) to 2 and else 0\n# y_MC_2j_3f = flevt_MC_2j.where(4 <= flevt_MC_2j, 0).replace(4, 2).replace(5, 1)\ny_MC_3j_3f = flevt_MC_3j.where(4 <= flevt_MC_3j, 0).replace(4, 2).replace(5, 1)\n\n# # take 3-flavor events and make into 2-flavor by replacing 2's with 0's\n# y_MC_2j_2f = y_MC_2j_3f.replace(2, 0)\ny_MC_3j_2f = y_MC_3j_3f.replace(2, 0)\n\n# # get training and test indices for MC\n# index_MC_2j_train, index_MC_2j_test = train_test_index(X_MC_2j, test_size=0.20)\nindex_MC_3j_train, index_MC_3j_test = train_test_index(X_MC_3j, \n test_size=test_set_frac,\n random_split=True)\n\n# # get dictionaries of indices for flavours (b, c, l, cl, all) \n# # and splits (train, test, all)\n# dict_flavor_MC_2j = get_dict_flavor(y_MC_2j_3f)\n# dict_split_MC_2j = get_dict_split(y_MC_2j_3f.loc[index_MC_2j_train], \n# y_MC_2j_3f.loc[index_MC_2j_test])\ndict_flavor_MC_3j = get_dict_flavor(y_MC_3j_3f)\ndict_split_MC_3j = get_dict_split(y_MC_3j_3f.loc[index_MC_3j_train], \n y_MC_3j_3f.loc[index_MC_3j_test])\n\n\n# # make dataframecontainer using own class PandasContainer. \n# # Takes dataframe/series as input and the dictionaries of flavors and splits\n# dfc_X_MC_2j = PandasContainer(X_MC_2j, dict_flavor_MC_2j, dict_split_MC_2j, \n# max_rows=5, max_cols=7)\n# dfc_y_MC_2j_3f = PandasContainer(y_MC_2j_3f, dict_flavor_MC_2j, dict_split_MC_2j) \n# dfc_y_MC_2j_2f = PandasContainer(y_MC_2j_2f, dict_flavor_MC_2j, dict_split_MC_2j) \n\ndfc_X_MC_3j = PandasContainer(X_MC_3j, dict_flavor_MC_3j, dict_split_MC_3j)\ndfc_y_MC_3j_3f = PandasContainer(y_MC_3j_3f, dict_flavor_MC_3j, dict_split_MC_3j) \ndfc_y_MC_3j_2f = PandasContainer(y_MC_3j_2f, dict_flavor_MC_3j, dict_split_MC_3j) \n\n\n# # get nnbjet for 2- and 3-jet events\n# nnbjet_2j = df_MC_2j.loc[:, 'nnbjet']\n# dfc_nnbjet_2j = PandasContainer(nnbjet_2j, dict_flavor_MC_2j, dict_split_MC_2j) \nnnbjet_3j = df_MC_3j.loc[:, 'nnbjet']\ndfc_nnbjet_3j = PandasContainer(nnbjet_3j, dict_flavor_MC_3j, dict_split_MC_3j) \n\n\n#%% ===========================================================================\n# Create initial overview plots \n# =============================================================================\n\n\nprint_headline(\"Original B-tag\")\nprint(\"ROC for org b-tag, training set \\t\", roc_auc_score(dfc_y_MC_3j_2f['train'], \n dfc_nnbjet_3j['train']))\nprint(\"ROC for org b-tag, test set \\t\\t\", roc_auc_score(dfc_y_MC_3j_2f['test'], \n dfc_nnbjet_3j['test']))\n\n# histogram of nnbjet value for b,c,l,cl flavors \nif create_plots:\n fig_nnbjet, ax_nnbjet = plt.subplots(figsize=(10, 10))\n \n for flavor, flavor_df, color in dfc_nnbjet_3j.iterflavors(include_cl=True,\n include_color=True):\n ax_nnbjet.hist(flavor_df, 100, range=(0, 1), \n histtype='step', \n color=color_dict[color],\n label=flavor)\n \n ax_nnbjet.set(xlabel='b-tag', ylabel='Counts', title='Histogram of b-tags')\n ax_nnbjet.legend(loc='upper left')\n \n if save_plots:\n fig_nnbjet.savefig('./CM_figures/nnbjet_histogram.pdf', dpi=300)\n if close_figure_after_saving:\n plt.close('all')\n \n\n\n# pair grid plots of the input variables and b-tags. Quite slow to run.\nif create_pairgrid_plot and create_plots:\n # pair grid plots of the input variables: kde-plots, scatter_plots and kde-histograms\n g_inputvars = sns.PairGrid(dfc_X_MC_3j['train'].sample(10_000, random_state=42), \n diag_sharey=False)\n g_inputvars.map_lower(sns.kdeplot, cmap='Blues_d', n_levels=6) \n g_inputvars.map_upper(plt.scatter, s=0.2, alpha=0.2)\n g_inputvars.map_diag(sns.kdeplot, lw=2)\n if save_plots:\n g_inputvars.savefig('./CM_figures/pairgrid_input_vars.pdf', dpi=300)\n if close_figure_after_saving:\n plt.close('all')\n \n # plot of b-tags as 2d plot with marginal distributions on the axis\n g_btags = sns.JointGrid(x=nnbjet_3j[::2], y=nnbjet_3j[1::2], dropna=False,\n xlim=(0, 1), ylim=(0, 1), space=0, size=10)\n g_btags = g_btags.plot_joint(plt.scatter, s=1)\n # g_btags = g_btags.plot_joint(sns.kdeplot, cmap=\"Blues_d\", n_levels=3)\n g_btags = g_btags.plot_marginals(sns.distplot, bins=100, kde=False)\n g_btags.set_axis_labels(\"b-tag\", \"b-tag\")\n if save_plots:\n g_btags.savefig('./CM_figures/pairgrid_btags.pdf', dpi=300)\n g_btags.savefig('./CM_figures/pairgrid_btags.png', dpi=100)\n if close_figure_after_saving:\n plt.close('all')\n\n\n\n# overview of input variables as histograms\nif create_plots:\n\n fig_overview, ax_overview = plt.subplots(nrows=1, ncols=7, figsize=(16, 10))\n ax_overview = ax_overview.flatten()\n for flavor, df_flavor in dfc_X_MC_3j.iterflavors():\n for i, (name, col) in enumerate(df_flavor.iteritems()):\n ax_overview[i].hist(col, 50, label=flavor, histtype='step', log=False)\n ax_overview[i].set(xlabel=name)\n # ax_overview[-1].set_visible(False)\n ax_overview[0].legend(loc='upper right')\n \n fig_overview.tight_layout()\n if save_plots:\n fig_overview.savefig('./CM_figures/variables_overview.pdf', dpi=300)\n if close_figure_after_saving:\n plt.close('all')\n\n\n#%% ===========================================================================\n# Initiate XGBoost and LightGBM \n# =============================================================================\n\nverbose_eval = early_stopping_rounds // 2 if verbose else False\nmetrics_xgb = ['auc', 'error', 'logloss']\nmetrics_lgb = ['auc', 'binary_error', 'binary_logloss']\n\n\nfrom xgboost import XGBClassifier\nimport xgboost\n\nfrom lightgbm import LGBMClassifier\nimport lightgbm\n\n\n\nclf_org_xgb = XGBClassifier( n_estimators = 1000,\n learning_rate = 0.1,\n objective = 'binary:logistic',\n eval_metric = 'auc',\n # base_score = proportion_2j,\n n_jobs = cpu_n_jobs,\n random_state = 42,\n silent = True\n )\n\nclf_org_lgb = LGBMClassifier( n_estimators = 1000,\n learning_rate = 0.1,\n objective = 'binary',\n n_jobs = cpu_n_jobs,\n random_state = 42,\n silent = True,\n verbose = -1,\n )\n\nxgb_params = clf_org_xgb.get_xgb_params()\n\nlgb_params = clf_org_lgb.get_params()\nlgb_params.pop('n_estimators')\nlgb_params['num_threads'] = lgb_params['n_jobs']\nlgb_params.pop('n_jobs')\nlgb_params['seed'] = lgb_params['random_state']\nlgb_params.pop('random_state')\nlgb_params.pop('silent')\n\n\nxgb_cv_early_stopping = CV_EarlyStoppingTrigger(\n stopping_rounds = early_stopping_rounds, \n maximize_score = True,\n method = 'xgb') \n\nlgb_cv_early_stopping = CV_EarlyStoppingTrigger(\n stopping_rounds = early_stopping_rounds, \n maximize_score = True,\n method = 'lgb') \n\n\n\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.metrics import roc_auc_score\n# from sklearn.model_selection import StratifiedKFold\nimport scipy.stats as sp_stats\n# from scipy.stats import norm as sp_normal\n\n\n# A parameter grid for XGBoost\nparams_random_search_xgb = {\n 'min_child_weight': sp_stats.randint(1, 10),\n 'gamma': sp_stats.expon(0, 1),\n 'subsample': sp_stats.uniform(0.5, 1-0.5),\n 'colsample_bytree': sp_stats.uniform(0.5, 1-0.5),\n 'max_depth': sp_stats.randint(1, 10)\n }\n\nparams_random_search_lgb = {\n 'min_child_weight': sp_stats.randint(1, 10),\n 'subsample': sp_stats.uniform(0.5, 1-0.5),\n 'colsample_bytree': sp_stats.uniform(0.5, 1-0.5),\n 'max_depth': sp_stats.randint(1, 20),\n 'num_leaves': sp_stats.randint(2, 40),\n }\n\ncols_to_use = ['rank_test_score', 'params', 'mean_test_score','std_test_score', \n 'mean_train_score', 'std_train_score']\n\n\n\n#%% ===========================================================================\n# XGboost, 3-jet 2-flavour events - k-fold Cross Validation; # of trees\n# =============================================================================\n\nprint_headline(\"XGBoost\")\n\n# define training matrix and verbosity level \nxgb_trainDmatrix_3j_2f = xgboost.DMatrix(dfc_X_MC_3j['train'], \n label=dfc_y_MC_3j_2f['train'])\n\n\nfilename_cvres_xgb_3j_2f = f'{progress_folder}cvres_xgb_3j_2f_{str_vars}_{int(down_sample*100)}_percent'\n\nif do_load_filename(filename_cvres_xgb_3j_2f, force_rerun): \n\n print('Loading XGB CV resulsts\\n')\n \n #load saved model\n cvres_xgb_3j_2f = load_model(filename_cvres_xgb_3j_2f)\n\nelse:\n \n print(\"Running cross validation with early stopping on XGB\")\n \n with MyTimer(\"k-fold CV, XGB\", cpu_n_jobs) as t:\n \n # run k-fold CV with XGB\n cvres_xgb_3j_2f = xgboost.cv( xgb_params, \n xgb_trainDmatrix_3j_2f, \n num_boost_round = num_boost_round, \n early_stopping_rounds = early_stopping_rounds,\n \n nfold = n_fold, \n metrics = metrics_xgb,\n stratified = True, \n seed = 42, \n shuffle = True,\n verbose_eval = verbose_eval,\n callbacks = [xgb_cv_early_stopping],\n )\n \n \n #save model\n save_model(cvres_xgb_3j_2f, filename_cvres_xgb_3j_2f)\n\n\n# get best result:\nN_est_cv_best_xgb_3j_2f = cvres_xgb_3j_2f['test-auc-mean'].values.argmax()\nAUC_best_mean_xgb_3j_2f, AUC_best_std_xgb_3j_2f = cvres_xgb_3j_2f.loc[\n cvres_xgb_3j_2f.index[N_est_cv_best_xgb_3j_2f], \n ['test-auc-mean', 'test-auc-std']]\n\nprint(f\"Best number of estimators based on cross validation: {N_est_cv_best_xgb_3j_2f}\")\nstr_xgb_3j_2f = (f\"ROC for XGB-tag, {n_fold}-fold CV set \\t\\t \" + \n f\"{AUC_best_mean_xgb_3j_2f:.5f} +/- {AUC_best_std_xgb_3j_2f:.5f}\")\nprint(str_xgb_3j_2f)\n\n# fit normal xgb classfier using the best number of estimators from CV\nclf_xgb_3j_2f = copy(clf_org_xgb)\nclf_xgb_3j_2f.set_params(n_estimators=N_est_cv_best_xgb_3j_2f)\n\n\n\n#%% XGBoost Random Search\n\nfilename_rs_xgb_3j_2f = f'{progress_folder}rs_xgb_3j_2f_{str_vars}_{int(down_sample*100)}_percent'\nfilename_rs_xgb_3j_2f_res = f'{progress_folder}rs_xgb_3j_2f_res_{str_vars}_{int(down_sample*100)}_percent'\n\n\nif do_load_filename(filename_rs_xgb_3j_2f, force_rerun): \n\n #load saved model\n rs_xgb_3j_2f = load_model(filename_rs_xgb_3j_2f)\n rs_xgb_3j_2f_res = load_model(filename_rs_xgb_3j_2f_res)\n \n\nelse:\n \n print(\"\\nRunning Random Search on XGB\\n\")\n \n # run randomized grid search\n rs_xgb_3j_2f = RandomizedSearchCV(clf_xgb_3j_2f, \n param_distributions = params_random_search_xgb, \n n_iter = n_iter_RS, \n scoring = 'roc_auc', \n n_jobs = 1, \n cv = n_fold,\n verbose = 2, \n random_state = 42,\n return_train_score = True,\n refit = True)\n \n with MyTimer(\"Random Search, XGB\", cpu_n_jobs) as t: \n rs_xgb_3j_2f.fit(dfc_X_MC_3j['train'], dfc_y_MC_3j_2f['train'])\n \n \n rs_xgb_3j_2f_res = pd.DataFrame(rs_xgb_3j_2f.cv_results_)\n rs_xgb_3j_2f_res = rs_xgb_3j_2f_res.loc[:, cols_to_use].sort_values(by='rank_test_score')\n\n #save model\n save_model(rs_xgb_3j_2f, filename_rs_xgb_3j_2f)\n save_model(rs_xgb_3j_2f_res, filename_rs_xgb_3j_2f_res)\n print(\"\\n\")\n \n\nclf_xgb_3j_2f = rs_xgb_3j_2f.best_estimator_\n\n\nprint(rs_xgb_3j_2f.best_params_)\nprint(\"\\n\")\nprint(rs_xgb_3j_2f_res.loc[:, ['mean_test_score', 'std_test_score']].iloc[0, :])\nprint(\"\\n\")\n\n\n# XGB: Results of Random Search\nif not is_hep:\n plot_random_search(rs_xgb_3j_2f_res, n_fold, f'XGB 3j 2f, {int(down_sample*100)}% data', \n ylim=(0.79, 0.81))\n\n\n\n#%% XGB: Fitting final model to data\n\nfilename_clf_xgb_3j_2f = f'{progress_folder}clf_xgb_3j_2f_{str_vars}_{int(down_sample*100)}_percent'\n\nif do_load_filename(filename_clf_xgb_3j_2f, force_rerun): \n #load saved model\n clf_xgb_3j_2f = load_model(filename_clf_xgb_3j_2f)\n\n\nelse:\n \n print(\"\\nFitting XGB\\n\")\n # fit to data\n \n with MyTimer(\"Fitting training data, XGB\", cpu_n_jobs) as t: \n clf_xgb_3j_2f.fit(dfc_X_MC_3j['train'], \n dfc_y_MC_3j_2f['train'], \n eval_metric = metrics_xgb, \n verbose = False,\n eval_set = [((dfc_X_MC_3j['train'], dfc_y_MC_3j_2f['train'])), \n (dfc_X_MC_3j['test'], dfc_y_MC_3j_2f['test'])])\n \n #save model\n save_model(clf_xgb_3j_2f, filename_clf_xgb_3j_2f)\n \n \n# predict b-tags and scores\ny_pred_xgb_3j_2f_test = clf_xgb_3j_2f.predict(dfc_X_MC_3j['test'])\ny_scores_xgb_3j_2f_test = clf_xgb_3j_2f.predict_proba(dfc_X_MC_3j['test'])[:, 1]\ny_scores_xgb_3j_2f_train = clf_xgb_3j_2f.predict_proba(dfc_X_MC_3j['train'])[:, 1]\n\ny_scores_xgb_3j_2f = y_scores_xgb_3j_2f_test\n\nprint(\"ROC for XGB-tag, test set \\t\\t\", roc_auc_score(dfc_y_MC_3j_2f['test'], y_scores_xgb_3j_2f_test))\n\n\ny_scores_xgb_3j_2f_test = pd.Series(y_scores_xgb_3j_2f_test, index=dfc_X_MC_3j['test'].index)\ny_scores_xgb_3j_2f_train = pd.Series(y_scores_xgb_3j_2f_train, index=dfc_X_MC_3j['train'].index)\ny_scores_xgb_3j_2f_all = y_scores_xgb_3j_2f_train.append(y_scores_xgb_3j_2f_test).sort_index()\n\ndfc_y_scores_xgb_3j = PandasContainer(y_scores_xgb_3j_2f_all, \n dict_flavor_MC_3j, dict_split_MC_3j)\n\n\n# histogram of b-tags for nnbjet (org) and for XGBoost \nif create_plots:\n \n fig_btag_hist, ax_btag_hist = plt.subplots(figsize=(12, 8))\n \n ax_btag_hist.hist(dfc_nnbjet_3j['b', 'test'], 100, range=(0, 1), histtype='step',\n density=True, label='nnbtag b_test', color=color_dict['blue'])\n ax_btag_hist.hist(dfc_nnbjet_3j['b', 'train'], 100, range=(0, 1), histtype='step',\n density=True, label='nnbtag b_train', color=color_dict['blue'], linestyle='dashed')\n \n ax_btag_hist.hist(dfc_nnbjet_3j['cl', 'test'], 100, range=(0, 1), histtype='step',\n density=True, label='nnbtag cl_test', color=color_dict['orange'])\n ax_btag_hist.hist(dfc_nnbjet_3j['cl', 'train'], 100, range=(0, 1), histtype='step',\n density=True, label='nnbtag cl_train', color=color_dict['orange'], linestyle='dashed')\n \n ax_btag_hist.hist(dfc_y_scores_xgb_3j['b', 'test'], 100, range=(0, 1), histtype='step',\n density=True, label='xgb b_test', color=color_dict['red'])\n ax_btag_hist.hist(dfc_y_scores_xgb_3j['b', 'train'], 100, range=(0, 1), histtype='step',\n density=True, label='xgb b_train', color=color_dict['red'], linestyle='dashed')\n \n ax_btag_hist.hist(dfc_y_scores_xgb_3j['cl', 'test'], 100, range=(0, 1), histtype='step',\n density=True, label='xgb cl_test', color=color_dict['green'])\n ax_btag_hist.hist(dfc_y_scores_xgb_3j['cl', 'train'], 100, range=(0, 1), histtype='step',\n density=True, label='xgb cl_train', color=color_dict['green'], linestyle='dashed')\n \n ax_btag_hist.set(xlabel='b-tag', ylabel='Normalized Counts', \n title='Histogram of b-tags for NN and XGB')\n \n ax_btag_hist.legend(loc='upper center')\n \n if close_figure_after_saving:\n plt.close('all')\n\n\n\n# roc curve for xgboost compared to nnbjet (org)\nif create_plots:\n \n fig_roc_curve, ax_roc_curve = plt.subplots(figsize=(12, 8))\n \n for name, y_btag, color in zip(['nnbtag', 'xgb'], \n [dfc_nnbjet_3j['test'], y_scores_xgb_3j_2f_test],\n ['blue', 'red']):\n \n fpr, tpr, thresholds = roc_curve(dfc_y_MC_3j_2f['test'], y_btag, pos_label=1)\n \n signal_eff = tpr\n background_rej = 1 - fpr\n # background_acc = 1 - background_rej\n \n roc_auc = auc(fpr, tpr)\n \n ax_roc_curve.plot(signal_eff, background_rej, color=color_dict[color], \n label=f'{name}, AUC = {roc_auc:0.4f})')\n \n ax_roc_curve.plot([0, 1], [0, 1], color=color_dict['k'], lw=1, linestyle='--')\n \n ax_roc_curve.set(xlim=[0.0, 1.0], ylim=[0.0, 1.05], \n xlabel='Signal Efficiency', ylabel='Background Rejection',\n title='ROC-curve')\n \n ax_roc_curve.legend(loc=\"lower right\")\n \n \n if save_plots:\n fig_roc_curve.savefig('./CM_figures/ROC_curve.pdf', dpi=300)\n if close_figure_after_saving:\n plt.close('all')\n\n\n\n# learning curves for CV run and normal run for XGBoost\nif create_plots:\n \n fig_xgb_3j_2f_cv, ax_xgb_3j_2f_cv = plt.subplots(figsize=(12, 6))\n plot_cv_res(cvres_xgb_3j_2f, ax_xgb_3j_2f_cv, metrics_xgb, method='xgb', n_sigma=n_sigma)\n ax_xgb_3j_2f_cv.set(title='XGB: 3-jet, 2 flavours, CV run')\n if save_plots:\n fig_xgb_3j_2f_cv.savefig('./CM_figures/XGB_3jet_2flavor_CV_run.pdf', dpi=300)\n \n \n \n xgb_eval_3j_2f = clf_xgb_3j_2f.evals_result()\n fig_xgb_3j_2f_normal, ax_xgb_3j_2f_normal = plot_cv_test_results(\n xgb_eval_3j_2f, \n method = 'xgb',\n metrics = metrics_xgb,\n title = 'XGB: 3-jet, 2 flavours, normal run')\n if save_plots:\n fig_xgb_3j_2f_normal.savefig('./CM_figures/XGB_3jet_2flavor_normal_run.pdf', \n dpi=300)\n\n if close_figure_after_saving:\n plt.close('all')\n\n\n\n\n#%% ===========================================================================\n# LightGBM, 3-jet 2-flavour events \n# =============================================================================\n\nprint_headline(\"LightGBM\", start=\"\\n\\n\", end=\"\\n\\n\")\n\n# define training matrix\ndftrainLGB_3j_2f = lightgbm.Dataset(data = dfc_X_MC_3j['train'].values, \n label = dfc_y_MC_3j_2f['train'].values, \n feature_name = dfc_X_MC_3j['train'].columns.tolist(),\n silent=True)\n\n\n\nfilename_cvres_lgb_3j_2f = f'{progress_folder}cvres_lgb_3j_2f_{str_vars}_{int(down_sample*100)}_percent'\n\nif do_load_filename(filename_cvres_lgb_3j_2f, force_rerun): \n\n #load saved model\n cvres_lgb_3j_2f = load_model(filename_cvres_lgb_3j_2f)\n\n\nelse:\n \n print(\"Running cross validation with early stopping on LGB\")\n\n with MyTimer(\"k-fold CV, LGB\", cpu_n_jobs) as t: \n # run k-fold CV with LGB\n cvres_lgb_3j_2f = pd.DataFrame(lightgbm.cv(\n lgb_params,\n dftrainLGB_3j_2f,\n \n num_boost_round = num_boost_round,\n early_stopping_rounds = early_stopping_rounds,\n \n nfold = n_fold,\n stratified = True,\n shuffle = True,\n metrics = metrics_lgb,\n \n seed = 42,\n verbose_eval = verbose_eval,\n callbacks = [lgb_cv_early_stopping],\n ))\n\n #save model\n save_model(cvres_lgb_3j_2f, filename_cvres_lgb_3j_2f)\n\n\n\n# get best result:\nN_est_cv_best_lgb_3j_2f = cvres_lgb_3j_2f['auc-mean'].values.argmax()\nAUC_best_mean_lgb_3j_2f, AUC_best_std_lgb_3j_2f = cvres_lgb_3j_2f.loc[\n cvres_lgb_3j_2f.index[N_est_cv_best_lgb_3j_2f], \n ['auc-mean', 'auc-stdv']]\n\nprint(f\"Best number of estimators based on cross validation: {N_est_cv_best_lgb_3j_2f}\")\nstr_lgb_3j_2f = (f\"ROC for LGB-tag, {n_fold}-fold CV set \\t\\t \" + \n f\"{AUC_best_mean_lgb_3j_2f:.5f} +/- {AUC_best_std_lgb_3j_2f:.5f}\")\nprint(str_lgb_3j_2f)\n\n\n# fit normal lgb classfier using the best number of estimators from CV\nclf_lgb_3j_2f = copy(clf_org_lgb)\nclf_lgb_3j_2f.set_params(n_estimators=N_est_cv_best_lgb_3j_2f)\n\n\n\n\n\n\n#%% LGB: Random Search\n\n\nfilename_rs_lgb_3j_2f = f'{progress_folder}rs_lgb_3j_2f_{str_vars}_{int(down_sample*100)}_percent'\nfilename_rs_lgb_3j_2f_res = f'{progress_folder}rs_lgb_3j_2f_res_{str_vars}_{int(down_sample*100)}_percent'\n\n\nif do_load_filename(filename_rs_lgb_3j_2f, force_rerun): \n\n #load saved model\n rs_lgb_3j_2f = load_model(filename_rs_lgb_3j_2f)\n rs_lgb_3j_2f_res = load_model(filename_rs_lgb_3j_2f_res)\n\nelse:\n \n print(\"\\nRunning Random Search on LGB\\n\")\n \n \n # run randomized grid search\n rs_lgb_3j_2f = RandomizedSearchCV(clf_lgb_3j_2f, \n param_distributions = params_random_search_lgb, \n n_iter = n_iter_RS, \n scoring = 'roc_auc', \n n_jobs = 1, \n cv = n_fold,\n verbose = 2, \n random_state = 42,\n return_train_score = True,\n refit = True)\n\n with MyTimer(\"Random Search, LGB\", cpu_n_jobs) as t: \n rs_lgb_3j_2f.fit(dfc_X_MC_3j['train'], dfc_y_MC_3j_2f['train'], verbose=-1)\n \n rs_lgb_3j_2f_res = pd.DataFrame(rs_lgb_3j_2f.cv_results_)\n rs_lgb_3j_2f_res = rs_lgb_3j_2f_res.loc[:, cols_to_use].sort_values(by='rank_test_score')\n \n \n\n #save model\n save_model(rs_lgb_3j_2f, filename_rs_lgb_3j_2f)\n save_model(rs_lgb_3j_2f_res, filename_rs_lgb_3j_2f_res)\n print(\"\\n\")\n\n\nclf_lgb_3j_2f = rs_lgb_3j_2f.best_estimator_\n\n\nprint(rs_lgb_3j_2f.best_params_)\nprint(rs_lgb_3j_2f_res.loc[:, ['mean_test_score', 'std_test_score']].iloc[0, :])\n\n\n# LGB: Results of Random Search\nif not is_hep:\n plot_random_search(rs_lgb_3j_2f_res, n_fold, f'LGB 3j 2f, {int(down_sample*100)}% data', ylim=(0.79, 0.81))\n\n\n#%% LGB fitting final model\n\nfilename_clf_lgb_3j_2f = f'{progress_folder}clf_lgb_3j_2f_{str_vars}_{int(down_sample*100)}_percent'\nif do_load_filename(filename_clf_lgb_3j_2f, force_rerun): \n\n #load saved model\n clf_lgb_3j_2f = load_model(filename_clf_lgb_3j_2f)\n\n\nelse:\n \n print(\"\\nFitting LGB\\n\")\n \n with MyTimer(\"Fitting training data, LGB\", cpu_n_jobs) as t: \n clf_lgb_3j_2f.fit(dfc_X_MC_3j['train'], dfc_y_MC_3j_2f['train'], \n eval_metric=metrics_lgb, \n verbose = False,\n eval_set = [((dfc_X_MC_3j['train'], dfc_y_MC_3j_2f['train'])), \n (dfc_X_MC_3j['test'], dfc_y_MC_3j_2f['test'])],\n eval_names = ['train', 'test'],\n )\n #save model\n save_model(clf_lgb_3j_2f, filename_clf_lgb_3j_2f)\n\n\n# predict b-tags and scores\ny_pred_lgb_3j_2f = clf_lgb_3j_2f.predict(dfc_X_MC_3j['test'])\ny_scores_lgb_3j_2f_test = clf_lgb_3j_2f.predict_proba(dfc_X_MC_3j['test'])[:, 1]\ny_scores_lgb_3j_2f_train = clf_lgb_3j_2f.predict_proba(dfc_X_MC_3j['train'])[:, 1]\n\ny_scores_lgb_3j_2f = y_scores_lgb_3j_2f_test\n\n\nprint(\"ROC for LGB-tag, test set \\t\\t\", roc_auc_score(dfc_y_MC_3j_2f['test'],\n y_scores_lgb_3j_2f))\n\n\ny_scores_lgb_3j_2f_test = pd.Series(y_scores_lgb_3j_2f_test, index=dfc_X_MC_3j['test'].index)\ny_scores_lgb_3j_2f_train = pd.Series(y_scores_lgb_3j_2f_train, index=dfc_X_MC_3j['train'].index)\ny_scores_lgb_3j_2f_all = y_scores_lgb_3j_2f_train.append(y_scores_lgb_3j_2f_test).sort_index()\n\ndfc_y_scores_lgb_3j = PandasContainer(y_scores_lgb_3j_2f_all, \n dict_flavor_MC_3j, dict_split_MC_3j)\n\n\n\n#%%\n\n# learning curves for CV run and normal run for LightGBM\nif create_plots:\n\n fig_lgb_3j_2f_cv, ax_lgb_3j_2f_cv = plt.subplots(figsize=(12, 6))\n plot_cv_res(cvres_lgb_3j_2f, ax_lgb_3j_2f_cv, metrics_lgb, method='lgb', n_sigma = n_sigma)\n ax_lgb_3j_2f_cv.set(title='LGB: 3-jet, 2 flavours, CV run')\n\n\n if save_plots:\n fig_lgb_3j_2f_cv.savefig('./CM_figures/LGB_3jet_2flavor_CV_run.pdf', dpi=300)\n if close_figure_after_saving:\n plt.close('all')\n\n\n\n lgb_eval_3j_2f = clf_lgb_3j_2f.evals_result_\n fig_lgb_3j_2f_normal, ax_lgb_3j_2f_normal = plot_cv_test_results(\n lgb_eval_3j_2f, \n method='lgb',\n metrics = metrics_lgb,\n title = 'LGB: 3-jet, 2 flavours, normal run')\n\n if save_plots:\n fig_lgb_3j_2f_normal.savefig('./CM_figures/LGB_3jet_2flavor_normal_run.pdf', \n dpi=300)\n if close_figure_after_saving:\n plt.close('all')\n\n\n\n#%%\n\nprint_headline(\"Pearson and Spearman correlations\")\n \nfrom scipy.stats import rankdata, pearsonr, spearmanr\n\nif not is_hep:\n \n fig, ax = plt.subplots(figsize=(16, 8))\n ax.scatter(y_scores_xgb_3j_2f, y_scores_lgb_3j_2f, s=4)\n ax.set(xlabel='XGB', ylabel='LGB', title='Proba')\n\ncorr_pearson = pearsonr(y_scores_xgb_3j_2f, y_scores_lgb_3j_2f)\ncorr_spearman = spearmanr(y_scores_xgb_3j_2f, y_scores_lgb_3j_2f)\n\nprint(\"corr_pearson\", corr_pearson)\nprint(\"corr_spearman\", corr_spearman)\n\n\nif not is_hep:\n # ranking: small values get low rank\n fig, ax = plt.subplots(figsize=(16, 8))\n ax.scatter(rankdata(y_scores_xgb_3j_2f), rankdata(y_scores_lgb_3j_2f), s=4)\n ax.set(xlabel='XGB', ylabel='LGB', title='Rank of proba')\n\n\n\n\n#%% SHAP Values\n\nprint_headline(\"SHAP Values\")\n\n# explain the model's predictions using SHAP values\n# (same syntax works for LightGBM, CatBoost, and scikit-learn models)\n\nexplainer_xgb_3j_2f = shap.TreeExplainer(clf_xgb_3j_2f)\nexplainer_lgb_3j_2f = shap.TreeExplainer(clf_lgb_3j_2f)\n\n\nfilename_shap_values_xgb_3j_2f = f'{progress_folder}shap_values_xgb_3j_2f_{str_vars}_{int(down_sample*100)}_percent'\nfilename_shap_values_lgb_3j_2f = f'{progress_folder}shap_values_lgb_3j_2f_{str_vars}_{int(down_sample*100)}_percent'\n\n\nif do_load_filename(filename_shap_values_xgb_3j_2f, force_rerun): \n shap_values_xgb_3j_2f = load_model(filename_shap_values_xgb_3j_2f)\n shap_values_lgb_3j_2f = load_model(filename_shap_values_lgb_3j_2f)\n\nelse:\n \n with MyTimer(\"Calculating SHAP values, XGB\", cpu_n_jobs) as t: \n shap_values_xgb_3j_2f = explainer_xgb_3j_2f.shap_values(dfc_X_MC_3j['test'])\n with MyTimer(\"Calculating SHAP values, LGB\", cpu_n_jobs) as t: \n shap_values_lgb_3j_2f = explainer_lgb_3j_2f.shap_values(dfc_X_MC_3j['test'])\n\n save_model(shap_values_xgb_3j_2f, filename_shap_values_xgb_3j_2f)\n save_model(shap_values_lgb_3j_2f, filename_shap_values_lgb_3j_2f) # TODO\n\n\nif create_shap_plot:\n \n for shap_values, name in zip([shap_values_xgb_3j_2f, shap_values_lgb_3j_2f], \n ['XGB', 'LGB']):\n \n \n # create a SHAP dependence plot to show the effect of a single feature across the whole dataset\n plt.figure()\n shap.dependence_plot(\"projet\", shap_values, dfc_X_MC_3j['test'])\n plt.title(name)\n \n # dependence plot with a specific interaction index: here itself.\n plt.figure()\n shap.dependence_plot(\"projet\", shap_values, dfc_X_MC_3j['test'], \n show=True, interaction_index=\"projet\")\n plt.title(name)\n \n # summarize the effects of all the features\n plt.figure()\n shap.summary_plot(shap_values, dfc_X_MC_3j['test'])\n plt.title(name)\n \n # one-dimensional summary of all feature importances\n plt.figure()\n shap.summary_plot(shap_values, dfc_X_MC_3j['test'], plot_type=\"bar\")\n plt.title(name)\n \n # the numerical calculation of above plot is:\n shap_values_df = pd.DataFrame(shap_values, columns = dfc_X_MC_3j['test'].columns)\n shap_feature_importance = shap_values_df.abs().mean(0)\n \n\n\n\n\n\n\n# =============================================================================\n# Step 2! Check events\n# =============================================================================\n\n# tests that dfc_y_scores_lgb_3j and df_MC_3j (qmatch) has the same indices \n# and both are monotonically increasing i.e. exactly the same order\nassert dfc_y_scores_lgb_3j.df.index.equals(df_MC_3j.index)\nassert dfc_y_scores_lgb_3j.df.index.is_monotonic_increasing\nassert df_MC_3j.index.is_monotonic_increasing\n\nb_tags_pr_event_3j_lgb = dfc_y_scores_lgb_3j.df.values.reshape((-1, 3))\ndf_b_tags_pr_event_3j_lgb = pd.DataFrame(b_tags_pr_event_3j_lgb, \n index=dfc_y_scores_lgb_3j.df.index[::3],\n columns=['1st', '2nd', '3rd'])\n\nqmatch_pr_event_3j_lgb = df_MC_3j.qmatch.values.reshape((-1, 3))\ndf_qmatch_pr_event_3j_lgb = pd.DataFrame(qmatch_pr_event_3j_lgb, \n index=df_MC_3j.index[::3],\n columns=['1st', '2nd', '3rd'])\n\n\n\n\n\n\n\n\ndfc_y_scores_lgb_3j\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"CM_logs/2018.10.29_17.07.07__CM_g_tagging.py","file_name":"2018.10.29_17.07.07__CM_g_tagging.py","file_ext":"py","file_size_in_byte":35363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"229204851","text":"'''\r\nCreated on 29 ene. 2019\r\nClase Movil que hereda de Terminal\r\n@author: d18momoa\r\n'''\r\nfrom ejerciciosHerencia.Terminal import Terminal\r\nclass Movil(Terminal):\r\n def __init__(self,num,tarifa):\r\n super().__init__(num)\r\n self.__costo = 0\r\n if(tarifa == \"rata\" or tarifa == \"RATA\" or tarifa == \"mono\" or tarifa == \"MONO\" or tarifa == \"bisonte\" or tarifa == \"BISONTE\" ):\r\n self.__tarifa = tarifa\r\n else:\r\n print(\"Esa tarifa no existe.\")\r\n \r\n def llama(self,m2,tiempo):\r\n if (self.__tarifa == \"rata\" or self.__tarifa == \"RATA\"):\r\n self.__costo += (0.06 * ((tiempo/100)))\r\n elif (self.__tarifa == \"mono\" or self.__tarifa == \"MONO\"):\r\n self.__costo += (0.12 * ((tiempo/100)))\r\n elif (self.__tarifa == \"bisonte\" or self.__tarifa == \"BISONTE\"):\r\n self.__costo += (0.30 * ((tiempo/100))) \r\n else:\r\n print(\"Error al realizar la llamada.\")\r\n super().llama(m2, tiempo)\r\n def __str__(self):\r\n cadena = super().__str__()\r\n return cadena + \", tarificados \"+str(round(self.__costo,2))+\" euros\"\r\nif __name__ == '__main__':\r\n m1 = Movil(\"678 11 22 33\",\"rata\");\r\n m2 = Movil(\"644 74 44 69\",\"mono\");\r\n m3 = Movil(\"622 32 89 09\",\"bisonte\");\r\n print(m1);\r\n print(m2);\r\n m1.llama(m2, 320);\r\n m1.llama(m3, 200);\r\n m2.llama(m3, 550);\r\n print(m1);\r\n print(m2);\r\n print(m3);","sub_path":"ejerciciosHerencia/Movil.py","file_name":"Movil.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"32939681","text":"# THIS FILE IS FINAL\n\n\"\"\" EE 250L Lab 02: GrovePi Sensors\n\nList team members here.\nMingyu Cui\nJui Po Hung\n\nInsert Github repository link here.\ngit@github.com:usc-ee250-fall2018/GrovePi-robert.git\n\nEach team member should submit a copy of the team's code.\n\"\"\"\n\n\"\"\"python3 interpreters in Ubuntu (and other linux distros) will look in a \ndefault set of directories for modules when a program tries to `import` one. \nExamples of some default directories are (but not limited to):\n /usr/lib/python3.5\n /usr/local/lib/python3.5/dist-packages\n\nThe `sys` module, however, is a builtin that is written in and compiled in C for\nperformance. Because of this, you will not find this in the default directories.\n\"\"\"\n\nimport sys\n# By appending the folder of all the GrovePi libraries to the system path here,\n# we are successfully `import grovepi`\nsys.path.append('../../Software/Python/')\nimport grovepi\nimport time\nimport math\n\n# This append is to support importing the LCD library.\nsys.path.append('../../Software/Python/grove_rgb_lcd')\nimport grove_rgb_lcd\n\ngrove_rgb_lcd.setRGB(0,0,255) # set to blue so it looks better\n\n\"\"\"\nGrove Ultrasonic Ranger: D-4\nGrove Rotary Angle Sensor: A-0\nGrove LCD RGB Backlight: I2C-1\nGrove Temperature & Humidity Sensor: D-5\n\"\"\"\n\n\ndht_sensor_port = 5 # connect the DHt sensor to port D-5\ndht_sensor_type = 0 # use 0 for the blue-colored sensor and 1 for the white-colored sensor\n\nultrasonic_ranger = 4 # connect to port D-4\n\npotentiometer = 0 # connect to port A-0\ngrovepi.pinMode(potentiometer,\"INPUT\")\n\n\"\"\"This if-statement checks if you are running this python file directly. That \nis, if you run `python3 grovepi_sensors.py` in terminal, this if-statement will \nbe true\"\"\"\nif __name__ == '__main__':\n\n\t\n\n\n\n\twhile True:\n\t\ttry:\n\t\t\t#So we do not poll the sensors too quickly which may introduce noise,\n\t\t\t#sleep for a reasonable time of 1 second between each iteration.\n\t\t\ttime.sleep(1)\n\n\t\t\t# Read value from range sensor\n\t\t\tranger = grovepi.ultrasonicRead(ultrasonic_ranger)\n\n\t\t\t# Read value from potentiometer\n\t\t\tthreshold = grovepi.analogRead(potentiometer)\n\n#\t\t\tprint(\"threshold = %d ranger = %d\" %(threshold, ranger))\n\n\t\t\t# compare ranger and threshold\n\t\t\tinbound = \" \"\n\n\t\t\tif ranger <= threshold:\n\t\t\t\tinbound = \"OBJ PRESENT\"\n\n\t\t\t# get the temperature and Humidity from the DHT sensor\n\t\t\t[ temp,hum ] = grovepi.dht(dht_sensor_port,dht_sensor_type)\n#\t\t\tprint(\"temp = %d humidity = %d%%\" %(temp, hum))\n\n\t\t\t# check if we have nans\n\t\t\t# if so, then raise a type error exception\n\t\t\tif math.isnan(temp) is True or math.isnan(hum) is True:\n\t\t\t\traise TypeError('nan error')\n\n\t\t\t# write all data to LCD\n\t\t\tgrove_rgb_lcd.setText_norefresh(\"%4d %s\\n%4dcm %3d%% %3dC\" %(threshold, inbound, ranger, hum, temp))\n\n\n\t\texcept TypeError as e:\n\t\t\tprint(str(e))\n\t\texcept KeyboardInterrupt:\n\t\t\tprint (\"KeyboardInterrupt\")\n\t\t\tbreak\n\t\texcept IOError:\n\t\t\tprint (\"IOError\")\n","sub_path":"ee250/lab02/grovepi_sensors.py","file_name":"grovepi_sensors.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"536333078","text":"# Problem Statement \r\n\r\n# We have to swap find the minimum swaps to balance a paranthesis\r\n# [] ][][ ---- 2 \r\n# [][][] ----- 0 \r\n#[[[]]]][ ------ 1 \r\n\r\n\r\n# 1 . [] ---- 0 \r\n# 2 ][ ---- 1\r\n# 3 [[]] ---- 0 \r\n# 4 [] [ ] ---- 0 \r\n# 5 ][][ -----2 \r\n# 6 ]][[ -----3 \r\n\r\n\r\ndef minimumswaps(str):\r\n\tleftcount = 0\r\n\trightcount = 0\r\n\tswaps = 0\r\n\timbalance = 0 \r\n\r\n\tfor j in list(str):\r\n\t\tif j == '[':\r\n\t\t\tleftcount +=1\r\n\t\t\tif imbalance > 0 :\r\n\t\t\t\tswaps = swaps + imbalance\r\n\t\t\t\timbalance -=1\r\n\r\n\t\tif j ==']':\r\n\t\t\trightcount +=1\r\n\t\t\timbalance = rightcount - leftcount\t\t \r\n\r\n\treturn swaps\r\n\r\nprint(minimumswaps(\"][[]\"))\r\n\r\n\r\n","sub_path":"GreedyAlgorithms/MinimumSwapsForBracketBalancing.py","file_name":"MinimumSwapsForBracketBalancing.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"328465337","text":"# coding:utf-8\n# -*- coding:utf-8 -*-\n\nimport time\nfrom datetime import datetime, timedelta\nimport re\nfrom hdfs import client\nfrom logConfig import logger\nimport os\nfrom pyspark import SparkConf\nfrom pyspark.sql import SparkSession\n\n\n# class SparkSessionBase(object):\n# def __init__(self):\n# self.spark_appName = \"Spark_job\"\n# self.spark_master = \"yarn\"\n# self.spark_deploy_mode = 'client'\n# self.spark_executor_memory = \"10g\"\n# self.spark_num_executors = 14\n# self.spark_executor_cores = 4\n# self.spark_driver_memory = '1g'\n# self.spark_driver_python = \"/data/search/envspark/bin/python3\"\n# self.spark_pyspark_python = \"/data/search/envspark/bin/python3\"\n# self.spark_jars_1 = \"/data/search/spark/jars/bson-3.4.2.jar\"\n# self.spark_jars_2 = \"/data/search/spark/jars/mongo-spark-connector_2.11-2.1.1.jar\"\n#\n# def create_sparkSession(self):\n# conf = SparkConf()\n# configs = (\n# (\"spark.app.name\", self.spark_appName),\n# (\"spark.master\", self.spark_master),\n# (\"spark.submit.deployMode\", self.spark_deploy_mode),\n# (\"spark.executor.memory\", self.spark_executor_memory),\n# (\"spark.executor.instances\", self.spark_num_executors),\n# (\"spark.executor.cores\", self.spark_executor_cores),\n# (\"spark.driver.memory\", self.spark_driver_memory),\n# (\"spark.pyspark.driver.python\", self.spark_driver_python),\n# (\"spark.pyspark.python\", self.spark_pyspark_python),\n# (\"spark.jars\", self.spark_jars_1),\n# (\"spark.jars\", self.spark_jars_2))\n#\n# conf.setAll(configs)\n# spark = SparkSession.builder.config(conf=conf).getOrCreate()\n#\n# return spark\n\n\n\nclass Timer():\n def __init__(self):\n self.startTime = None\n self.endTime = None\n self.costTime = None\n\n\n def start(self):\n self.startTime = time.time()\n\n # def stop(self):\n # self.endTime = time.time()\n # logger.info('Time taken: {} ms'.format(round((self.endTime - self.startTime)*1000)))\n\n def cost(self):\n self.endTime = time.time()\n self.costTime = round((self.endTime - self.startTime)*1000)\n return self.costTime\n\ndef delete_before2_localData(fileName, params):\n before2_dateStr1 = datetime.strftime(params[\"generateDate\"] - timedelta(days=2), \"%Y%m%d\")\n pattern = re.compile(r'\\d{8}')\n before2_fileName = re.sub(pattern, before2_dateStr1, fileName)\n if os.path.exists(before2_fileName):\n os.remove(before2_fileName)\n logger.info(\"====\\\"{}\\\" delete finished ====\".format(before2_fileName))\n\n\ndef delete_before2_sparkData(fileName, params):\n clientHdfs = client.InsecureClient(params[\"hdfsHost\"], user=\"search\")\n before2_dateStr1 = datetime.strftime(params[\"generateDate\"] - timedelta(days=2), \"%Y%m%d\")\n pattern = re.compile(r'\\d{8}')\n before2_fileName = re.sub(pattern, before2_dateStr1, fileName)\n if before2_fileName in clientHdfs.list(os.path.dirname(fileName)):\n clientHdfs.delete(before2_fileName, recursive=True)\n logger.info(\"====\\\"{}\\\" delete finished ====\".format(before2_fileName))\n\ndef upload_to_hdfs(localFileName, sparkDirName, params):\n clientHdfs = client.InsecureClient(params[\"hdfsHost\"], user=\"search\")\n if sparkDirName.split('/')[-1] in clientHdfs.list(os.path.dirname(sparkDirName)):\n clientHdfs.delete(sparkDirName, recursive=True)\n clientHdfs.upload(sparkDirName, localFileName)\n logger.info(\"====\\\"{}\\\" upload to HDFS finished====\".format(localFileName.split('/')[-1]))\n delete_before2_sparkData(sparkDirName, params)","sub_path":"predict-2019/train_ticket_predict/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"628928558","text":"import random\nimport operator\nimport matplotlib\nimport tkinter\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot\nimport matplotlib.animation \nimport agentframework\nimport csv\nimport Wolf\n\n\n#creating environments\nenvironment=[]\nrowlist=[\"rowlist\"]\n\nwith open('in.txt') as f:\n for row in f:\n parsed_line = str.split(row,\",\")\n rowlist=[]\n for coordinate in parsed_line:\n rowlist.append(float(coordinate))\n environment.append(rowlist)\n \n\n#Agent Parameters\nnum_of_agents = 50\n#num_of_iterations = 100\nneighbourhood = 10\nagents = []\n#Wolf Parameters\nnum_of_wolf = 1\nneighbourhood_wolf = 10\nwolf = []\n\nfig = matplotlib.pyplot.figure(figsize=(7, 7))\nax = fig.add_axes([0, 0, 1, 1])\n\n\n# Make the agents.\nfor i in range(num_of_agents):\n agents.append(agentframework.Agent(environment, agents))\nfor j in range(num_of_wolf):\n agents.append(Wolf.wolf(environment, wolf))\n\ncarry_on = True\t\n\n#Agents Move, Eat and Share\n\t\ndef update(frame_number):\n \n fig.clear() \n global carry_on\n for i in range(num_of_agents):\n agents[i].move()\n agents[i].eat ()\n agents[i].share_with_neighbours(neighbourhood)\n wolf[j].attack()\n wolf[j].wolf_move()\n#Agent stopping condition, I.E wolf attack and kill function\ndef attack(self, neighbourhood_wolf):\n for agent in self.agents, self.wolf:\n attack = self.distance_between_wolf(agent)\n if attack <= neighbourhood:\n random.random <= 0\n agent, carry_on = False\n\n\n \n \n#plotting agents\n for i in range(num_of_agents, num_of_wolf):\n matplotlib.pyplot.scatter(agents[i].x,agents[i].y)\n matplotlib.pyplot.scatter(wolf[j].wolfx, wolf[j].wolfy)\n matplotlib.pyplot.xlim(0, 99)\n matplotlib.pyplot.ylim(0, 99)\n matplotlib.pyplot.imshow(environment)\n\n#Changing the Stopping Parameters\ndef gen_function(b = [0]):\n a = 0\n global carry_on #Not actually needed as we're not assigning, but clearer\n while (a < 100)& (carry_on) : \n yield a\t\t\t# Returns control and waits next call.\n a = a + 1\n \n#Animating The Agents\n\n#animation = matplotlib.animation.FuncAnimation(fig, update, interval=1, repeat=False, frames=10)\nanimation = matplotlib.animation.FuncAnimation(fig, update, frames=gen_function, repeat=False)\n\n#Running the Model\ndef run():\n animation = matplotlib.animation.FuncAnimation(fig, update, frames=gen_function, repeat=False)\n canvas.draw()\n\n \n \n# Creating General User Interface\nroot = tkinter.Tk()\nroot.wm_title(\"Model\")\nmenubar = tkinter.Menu(root)\nroot.config(menu=menubar)\nmodel_menu = tkinter.Menu(menubar)\nmenubar.add_cascade(label=\"Model\", menu=model_menu)\nmodel_menu.add_command(label=\"Run model\", command=run, state=\"normal\") \ncanvas = matplotlib.backends.backend_tkagg.FigureCanvasTkAgg(fig, master=root)\ncanvas._tkcanvas.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)\n\ntkinter.mainloop()\n","sub_path":"Animation.py","file_name":"Animation.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"381837778","text":"\n\nfrom xai.brain.wordbase.nouns._personality import _PERSONALITY\n\n#calss header\nclass _PERSONALITIES(_PERSONALITY, ):\n\tdef __init__(self,): \n\t\t_PERSONALITY.__init__(self)\n\t\tself.name = \"PERSONALITIES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"personality\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_personalities.py","file_name":"_personalities.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"37630155","text":"import numpy as np\n\nraw_data = [ [1, 3], [2, 4], [3, 5], [4, 6], [5, 7] ]\nx_data = []\nt_data = []\n \ndef data_slicing(data):\n global x_data, t_data\n for i in data:\n x_data.append(i[0])\n t_data.append(i[1])\n\n x_data = np.array(x_data).reshape(len(data),1)\n t_data = np.array(t_data).reshape(len(data),1)\ndata_slicing(raw_data)\n#print(x_data)\n\nW = np.random.rand(1,1) # 난수생성 0 ~ 1 사이 # 2차원 ex)[[0.35259878]]\nb = np.random.rand(1) # 난수생성 0 ~ 1 사이 #1차원 ex)[0.7782744]\n\ndef loss_func(x, t): # 손실함수\n y = np.dot(x,W) + b # y = Wx + b 형태, 행렬의 곱으로 나타냄\n return ( np.sum( (t - y)**2 ) ) / ( len(x) )\n\n # 미분하는 함수, 손실함수를 미분해서 가장변화가 작은 값을 찾기위해 사용\n # 입력값에 대해 아주작은 변화에 따른 결과\ndef numerical_derivative(f, x):\n delta_x = 1e-4 # 0.0001 \n grad = np.zeros_like(x)\n \n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n \n while not it.finished:\n idx = it.multi_index \n tmp_val = x[idx]\n x[idx] = float(tmp_val) + delta_x\n fx1 = f(x) # f(x+delta_x)\n \n x[idx] = tmp_val - delta_x \n fx2 = f(x) # f(x-delta_x)\n grad[idx] = (fx1 - fx2) / (2*delta_x)\n \n x[idx] = tmp_val \n it.iternext() \n \n return grad\n\n\n# 손실함수 값 계산 함수\n# 입력변수 x, t : numpy type\ndef error_val(x, t): # 손실함수와 같지만 역할을 달리해서 명시적으로 나눠놓음\n y = np.dot(x,W) + b\n \n return ( np.sum( (t - y)**2 ) ) / ( len(x) )\n\n# 학습을 마친 후, 임의의 데이터에 대해 미래 값 예측 함수\n# 입력변수 x : numpy type\ndef predict(x):\n y = np.dot(x,W) + b\n \n return y\n\n\nlearning_rate = 1e-2 # 발산하는 경우, 1e-3 ~ 1e-6 등으로 바꾸어서 실행\n\nf = lambda x : loss_func(x_data,t_data)\nprint(\"Initial error value = \", error_val(x_data, t_data), \"Initial W = \", W, \"\\n\", \", b = \", b )\n\nfor step in range(8001): \n W -= learning_rate * numerical_derivative(f, W)\n b -= learning_rate * numerical_derivative(f, b)\n \n if (step % 400 == 0):\n print(\"step = \", step, \"error value = \", error_val(x_data, t_data), \"W = \", W, \", b = \",b )\n\nprint(predict(43))\n","sub_path":"linear_RegressionEX.py","file_name":"linear_RegressionEX.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"366996598","text":"import math\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\n\r\nclass SAModule(nn.Module):\r\n def __init__(self, channels, reduction, act_layer, num_attention_heads=1):\r\n super(SAModule, self).__init__()\r\n self.all_head_size = channels // reduction\r\n self.num_attention_heads = num_attention_heads\r\n self.attention_head_size = int(self.all_head_size / self.num_attention_heads)\r\n\r\n self.query = nn.Conv2d(\r\n channels, self.all_head_size, kernel_size=1, padding=0,\r\n )\r\n self.key = nn.Conv2d(\r\n channels, self.all_head_size, kernel_size=1, padding=0,\r\n )\r\n self.value = nn.Conv2d(\r\n channels, self.all_head_size, kernel_size=1, padding=0,\r\n )\r\n self.output = nn.Conv2d(\r\n self.all_head_size, channels, kernel_size=1, padding=0,\r\n )\r\n self.gamma = nn.Parameter(torch.FloatTensor([0.]))\r\n if act_layer is not None:\r\n self.act = act_layer(inplace=True)\r\n else:\r\n self.act = None\r\n\r\n def forward(self, x):\r\n query_layer = self.query(x)\r\n key_layer = self.key(x)\r\n value_layer = self.value(x)\r\n\r\n # single-headed attention\r\n attention_scores = torch.matmul(\r\n key_layer.view(x.size(0), self.all_head_size, -1).transpose(-1, -2),\r\n query_layer.view(x.size(0), self.all_head_size, -1))\r\n attention_scores = attention_scores / math.sqrt(self.all_head_size)\r\n attention_probs = F.softmax(attention_scores, dim=-1)\r\n context_layer = torch.matmul(\r\n value_layer.view(x.size(0), self.all_head_size, -1),\r\n attention_probs)\r\n context_layer = context_layer.view(\r\n x.size(0), self.all_head_size, x.size(-2), x.size(-1))\r\n output_layer = self.output(context_layer)\r\n x = x + output_layer * self.gamma\r\n if self.act is not None:\r\n x = self.act(x)\r\n return x\r\n","sub_path":"cvcore/modeling/self_attention.py","file_name":"self_attention.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"240243004","text":"import random\nimport threading\nfrom cls import cls\n\nanzahl = 0\n\ndef zufall_wort(w):\n wort = w\n liste = []\n zufallswort = \"\"\n for x in wort:\n liste.append(x)\n laenge = len(liste)\n for x in range(laenge):\n zufallsbuchstabe = random.choice(liste)\n pos = liste.index(zufallsbuchstabe)\n del(liste[pos])\n zufallswort += zufallsbuchstabe\n return zufallswort\n\na = True\n\n\ndef stop():\n global a\n input()\n a = False\n\n\nbenutzerwort = \"Thorsten\"\n\nwortliste = [benutzerwort]\nhinzu = True\nanzeige = 1\nkomanzeige = 1\ni = 100\nthreading._start_new_thread(stop, () )\nwhile a == True:\n hinzu = True\n neueswort = zufall_wort(benutzerwort)\n komanzeige += 1\n for x in wortliste:\n if x == neueswort:\n hinzu = False\n if hinzu == True:\n wortliste.append(neueswort)\n anzeige += 1\n if komanzeige == i:\n cls()\n print(\"hz W:\", anzeige, \"al W:\", komanzeige)\n i += 100\n\ncls()\n\nprint(wortliste)\nprint(len(wortliste))\nprint(komanzeige)\n\nwortliste.sort(key=lambda x: \"\".join(x).lower())\n\n\ndateihandler = open(\"0.txt\", mode='w')\n\nfor x in wortliste:\n dateihandler.write(x + \"\\n\")\n","sub_path":"python/ideen/anzahlwoerter.py","file_name":"anzahlwoerter.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"354685671","text":"import matplotlib.pyplot as plt\nimport sqlite3\nimport numpy as np\nimport matplotlib.patches as mpatches\nfrom graphs_common import RESTAURANTS_DB\n\n'''\nHorizontal Bar Chart Price breakdown by Grade\n'''\ndef prices():\n db = sqlite3.connect(RESTAURANTS_DB)\n cursor = db.cursor()\n\n price_counts = '''\n SELECT\n price,\n count(1) as ct\n FROM\n restaurants\n WHERE price IS NOT NULL AND health_grade IS ?\n GROUP BY price\n ORDER BY price asc;\n '''\n\n grade_distros = [('A', []), ('B', []), ('C', [])]\n for val in grade_distros:\n prices = cursor.execute(price_counts, (val[0],)).fetchall()\n assert len(prices) == 4\n data_pts = float(sum([p[1] for p in prices]))\n for p in prices:\n val[1].append(p[1]/data_pts * 100)\n\n idx = np.arange(3)\n height = .8\n\n price_abc = list(zip(grade_distros[0][1], grade_distros[1][1], grade_distros[2][1]))\n bottom = (0,0,0)\n colors = {1: '#FF0000', 2: '#FF7700', 3: '#FFFF00', 4: '#7FFF00', 5: '#03a503'}\n handles = []\n for s in range(0, len(list(price_abc))):\n plt.barh(idx, price_abc[s], height, left=bottom, color=colors[s+1])\n bottom = [sum(x) for x in zip(bottom, price_abc[s])]\n handles.append(mpatches.Patch(color=colors[s+1], label='{}'.format('\\\\$'*(s+1))))\n db.close()\n\n \n plt.legend(handles=handles, title=\"Price\")\n plt.title('Yelp Price vs Health Grade')\n plt.xlabel('Percent with Price')\n plt.ylabel('Grade')\n plt.yticks(idx, [\"A\", \"B\", \"C\"])\n plt.xticks(np.arange(0, 110, 10))\n plt.show()\n\nif __name__ == '__main__':\n prices()","sub_path":"graphs/prices_vs_health_score.py","file_name":"prices_vs_health_score.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"387724440","text":"from django.urls import path, include, re_path\r\nfrom .views import (\r\n WordsView,\r\n DteailView,\r\n CreateWordView,\r\n EditView,\r\n AddMeaningView,\r\n SelectMeaningAddExampleView,\r\n AddExamleView,\r\n EditMeaningView,\r\n SelectMeaningEditView,\r\n DeleteWordView,\r\n SelectMeaningDeleteView,\r\n DeleteMeaningView,\r\n)\r\n\r\napp_name = 'words'\r\n\r\nurlpatterns = [\r\n path('', WordsView.as_view(), name = 'words_list'),\r\n path('createw/', CreateWordView.as_view(), name='create_view'),\r\n re_path(r'^meanings/(?P\\d+)$',AddExamleView.as_view(), name='addexample'),\r\n re_path(r'^meanings/(?P\\d+)/edit$',EditMeaningView.as_view(), name='editmeaning'),\r\n re_path(r'^meanings/(?P\\d+)/delete$',DeleteMeaningView.as_view(), name='delmeaning'),\r\n re_path(r'^(?P[\\w-]+)/edit$', EditView.as_view(), name='edit'),\r\n re_path(r'^(?P[\\w-]+)/addmeaning$',AddMeaningView.as_view(), name='addmeaning'),\r\n re_path(r'^(?P[\\w-]+)/selectm$',SelectMeaningAddExampleView.as_view(), name='selectmeaning'),\r\n re_path(r'^(?P[\\w-]+)/selectmedit$',SelectMeaningEditView.as_view(), name='selectmeaningedit'),\r\n re_path(r'^(?P[\\w-]+)/selectmdel$',SelectMeaningDeleteView.as_view(), name='selectmeaningdel'),\r\n re_path(r'^(?P[\\w-]+)/deletew$', DeleteWordView.as_view(), name='deleteword'),\r\n re_path(r'^(?P[\\w-]+)/$', DteailView.as_view(), name = 'detailword'),\r\n]\r\n","sub_path":"words/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"455896858","text":"import pandas as pd\r\ntestfiledirectory = r'I:\\Christine Lin\\Transunion'\r\nsampledirectory=r'C:\\Users\\baili.lu\\Documents\\My Received Files'\r\n\r\n#read in zips to be joined to\r\nclaimszips = pd.read_excel(sampledirectory + '\\\\samplezips.xlsx', \r\n converters={'ZIP':str,'ZIPCODE_EXT':str}\r\n )\r\nclaimszips['Zip +4'] = claimszips['ZIP'] + claimszips['ZIPCODE_EXT'].astype(str).str.zfill(4)\r\n\r\n\r\n#read in layout file\r\nlayout = pd.read_excel(testfiledirectory + '\\\\Farmers Q4 2016 Standard_Ins Supp_CV ACD Z4 Mean Roll up layout.xlsx')\r\nlayout['Variable Name'][1197] = 'CURADDR2' #theres 2 columns called CURADDR. rename the second one so there's no conflict\r\nlayout1=pd.read_csv(sampledirectory+'\\layout1.csv')\r\n\r\n#read in data file\r\noutput = pd.DataFrame() #start with empty dataframe to keep appending to everytime you match a record\r\nfor c in range(4):\r\n chunk = pd.read_fwf(testfiledirectory + '\\Q4.2016.XA4.' + str(c).zfill(3), \r\n #nrows = 1000, #number rows to read in\r\n widths = layout['Length'].tolist(), #widths of each columns\r\n names = layout['Variable Name'].tolist(), # header names\r\n converters={'Zip +4':str},\r\n chunksize=50000\r\n )\r\n readinfile=pd.concat(chunk, ignore_index=True)\r\n print('loaded file ' + str(c))\r\n \r\n currentmatched = claimszips.merge(readinfile, how = 'inner')\r\n output = pd.concat([output, currentmatched])\r\n \r\n \r\n############################################################## \r\n\r\nfirst_row_list=[]\r\nfor i in range(172):\r\n first_row=pd.read_fwf(testfiledirectory + '\\Q4.2016.XA4.' + str(i).zfill(3), \r\n #nrows = 1000, #number rows to read in\r\n widths = layout['Length'].tolist(), #widths of each columns\r\n names = layout['Variable Name'].tolist(), # header names\r\n converters={'Zip +4':str},\r\n nrows=1\r\n )\r\n first_row_list.append(first_row)\r\nfirst_row_data=pd.concat(first_row_list, ignore_index=True)\r\n \r\n###############################################################\r\n\r\nusecols=['Zip +4','State']\r\nfirst_columns_list=[]\r\nfor i in range(172):\r\n first_columns=pd.read_fwf(testfiledirectory + '\\Q4.2016.XA4.' + str(i).zfill(3), \r\n #nrows = 1000, #number rows to read in\r\n widths = layout['Length'].tolist(), #widths of each columns\r\n names = layout['Variable Name'].tolist(), # header names\r\n converters={'Zip +4':str},\r\n usecols=usecols\r\n )\r\n first_columns_list.append(first_columns)\r\nfirst_col_data=pd.concat(first_columns, ignore_index=True)\r\n \r\n\r\n\r\n###############################\r\n\r\ndf1=pd.read_csv(testfiledirectory + '\\Q4.2016.XA4.000' ,sep='ABCDEFG',encoding='Latin1',header=None)\r\n\r\nh=df1.head(10)\r\n\r\ndef parse(str):\r\n return str[0:2]\r\n\r\n\r\ndef chunkstring(string):\r\n trydict={}\r\n for i in range(1514):\r\n \r\n trydict[layout.loc[i,'Variable Name']]=string[(layout.loc[i,'Start_Position']-1):layout.loc[i,'End_Position']]\r\n \r\n return pd.Series(trydict)\r\n \r\nh1=df1[0].apply(chunkstring)\r\n\r\n\r\n\r\n\r\n#####################################################\r\n\r\ndic={}\r\ndic[0]='tuna'\r\ndic[1]='sandwich'\r\ndic[2]='burger'\r\n\r\n','.join('{}'.format(val) for val in dic.values())\r\n","sub_path":"test matching.py","file_name":"test matching.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"36400810","text":"\nimport streamlit as st \nimport tempfile\nfrom PIL import Image\nimport streamlit.components.v1 as stc\nimport face_recognition\nimport cv2\nimport numpy as np\n\n\nHTML_BANNER = \"\"\"\n
\n

Face Recognition

\n
\n\"\"\"\n\nDEMO_VIDEO = 'demo.mp4'\nDEMO_IMAGE = 'test.png'\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nstc.html(HTML_BANNER)\n\nst.sidebar.title('Face Recognition')\n\nst.sidebar.text('Params For video')\n\nst.markdown(\n \"\"\"\n \n \"\"\",\n unsafe_allow_html=True)\n\n#IMAGW PART\n\nimg_file_buffer = st.sidebar.file_uploader(\"Upload the Test image\", type=[ \"jpg\", \"jpeg\",'png'])\n\nimfile = tempfile.NamedTemporaryFile(delete=False)\n\nif not img_file_buffer:\n image = np.array(Image.open(DEMO_IMAGE))\n \n img_file_buffer= DEMO_IMAGE\n \nelse:\n \n\n image = np.array(Image.open(img_file_buffer))\n imfile.write(img_file_buffer.read())\n\n \nname_input = st.sidebar.text_input('Name of the Person',value = 'Rose')\nuse_webcam = st.sidebar.button('Use Webcam')\nvideo_file_buffer = st.sidebar.file_uploader(\"Upload a video\", type=[ \"mp4\", \"mov\",'avi','asf', 'm4v' ])\n\ntfflie = tempfile.NamedTemporaryFile(delete=False)\n\nstop_button = st.sidebar.button('Stop Processing')\n\n\nif stop_button:\n st.stop()\n\n\n\nif not video_file_buffer:\n if use_webcam:\n vid = cv2.VideoCapture(0)\n \n else:\n vid = cv2.VideoCapture(DEMO_VIDEO)\n tfflie.name = DEMO_VIDEO\n \n\n \nelse:\n tfflie.write(video_file_buffer.read())\n vid = cv2.VideoCapture(tfflie.name)\n\nst.sidebar.text('Input Video')\nst.sidebar.video(tfflie.name)\n\n#starting with face encoding\nperson1_img = face_recognition.load_image_file(img_file_buffer)\ngiven_face_encoding = face_recognition.face_encodings(person1_img)[0]\n\nknown_face_encodings = [given_face_encoding]\n\nknown_face_name = [name_input]\n\nface_locations = []\nface_encodings = []\nface_names = []\nprocess_this_frame = True\n\nstframe = st.empty()\n\nwhile vid.isOpened():\n ret, frame = vid.read()\n\n if not ret:\n break\n small_frame = cv2.resize(frame,(0, 0), fx=0.25, fy=0.25)\n\n new_frame = small_frame[:,:,::-1]\n\n if process_this_frame:\n # Find all the faces and face encodings in the current frame of video\n face_locations = face_recognition.face_locations(new_frame)\n face_encodings = face_recognition.face_encodings(new_frame, face_locations)\n\n face_names = []\n for face_encoding in face_encodings:\n # See if the face is a match for the known face(s)\n matches = face_recognition.compare_faces(known_face_encodings, face_encoding)\n name = \"Unknown\"\n\n face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)\n best_match_index = np.argmin(face_distances)\n if matches[best_match_index]:\n name = known_face_name[best_match_index]\n\n face_names.append(name)\n\n process_this_frame = not process_this_frame\n\n for (top, right, bottom, left), name in zip(face_locations, face_names):\n\n top *= 4\n right *= 4\n bottom *= 4\n left *= 4\n\n\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n\n # Draw a label with a name below the face\n cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)\n font = cv2.FONT_HERSHEY_DUPLEX\n cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)\n\n\n\n stframe.image(frame,channels = 'BGR',use_column_width=True)\n\n \n\n\n\n\n\n\n\n\n\n","sub_path":"face_recog_app.py","file_name":"face_recog_app.py","file_ext":"py","file_size_in_byte":3853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"538195217","text":"from BasicWrapper import BasicWrapper\n\nADB_CMD = 'adb'\n\nclass ADB( object ):\n @staticmethod\n def LogcatDump(apk, serial_number=''):\n if serial_number:\n args = [ADB_CMD,\n '-s', serial_number,\n 'logcat', '-d',\n '-v', 'long',\n ]\n else:\n args = [ADB_CMD,\n 'logcat', '-d'\n '-v', 'long',\n ]\n\n output = BasicWrapper.run_cmd_longstdout(args)\n \n return output\n\n @staticmethod\n def LogcatClear(apk, serial_number=''):\n if serial_number:\n args = [ADB_CMD,\n '-s', serial_number,\n 'logcat', '-c'\n ]\n else:\n args = [ADB_CMD,\n 'logcat', '-c'\n ]\n\n BasicWrapper.run_cmd(args)\n\n return True\n \n @staticmethod\n def Uninstall(apk, serial_number=''):\n if serial_number:\n args = [ADB_CMD,\n '-s', serial_number,\n 'uninstall',\n apk['package']['name'],\n ]\n else:\n args = [ADB_CMD,\n 'uninstall',\n apk['package']['name'],\n ]\n\n BasicWrapper.run_cmd(args)\n\n return True\n\n @staticmethod\n def ShellMonkey(apk, rgc=75, serial_number=''):\n if serial_number:\n args = [ADB_CMD,\n '-s', serial_number,\n 'shell', 'monkey',\n '-p', \"'%s'\" % apk['package']['name'], str(rgc),\n ]\n else:\n args = [ADB_CMD,\n 'shell', 'monkey',\n '-p', \"'%s'\", apk['package']['name'], str(rgc),\n ]\n\n BasicWrapper.run_cmd_longstdout(args)\n\n return True\n \n @staticmethod\n def ShellAmStart(apk, serial_number=''):\n if serial_number:\n args = [ADB_CMD,\n '-s', serial_number,\n 'shell', 'am', 'start',\n '-n', '%s/%s' % (apk['package']['name'], apk['launchable-activity']['name']),\n ]\n else:\n args = [ADB_CMD,\n 'shell', 'am', 'start',\n '-n', '%s/%s' % (apk['package']['name'], apk['launchable-activity']['name']),\n ]\n\n BasicWrapper.run_cmd(args)\n\n return True\n\n @staticmethod\n def Install(apk, serial_number=''):\n if serial_number:\n args = [ADB_CMD,\n '-s', serial_number,\n 'install', apk['path'],\n ]\n else:\n args = [ADB_CMD,\n 'install', apk['path'],\n ]\n\n BasicWrapper.run_cmd(args)\n\n return True\n \n @staticmethod\n def Devices():\n args = [ADB_CMD, 'devices']\n\n stdout = BasicWrapper.run_cmd(args)\n\n return stdout.readlines()\n \n @staticmethod\n def KillServer():\n args = [ADB_CMD, 'kill-server']\n\n BasicWrapper.run_cmd(args)\n\n return True\n \n @staticmethod\n def StartServer():\n args = [ADB_CMD, 'start-server']\n\n BasicWrapper.run_cmd(args)\n\n return True\n\n @staticmethod\n def Connect(port='5555'):\n args = [ADB_CMD,\n 'connect',\n '127.0.0.1:%s' % port,\n ]\n\n stdout = BasicWrapper.run_cmd(args)\n \n return stdout.readlines()\n","sub_path":"farat/AdbWrapper.py","file_name":"AdbWrapper.py","file_ext":"py","file_size_in_byte":3581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"60983166","text":"# Faça um programa que leia tres numeros e mostre na tela qual é o maio e qual é o menor\n\nnum1 = int(input('Informe o primeiro numero inteiro: '))\nnum2 = int(input('Informe o segundo numero inteiro: '))\nnum3 = int(input('Informe o terceiro numero inteiro: '))\n\n# Verificando o menor\nmenor = num1\nif num2 < num1 and num2 < num3:\n menor = num2\nif num3 < num1 and num3 < num2:\n menor = num3\n\n# Verificando o maior\nmaior = num1\nif num2 > num1 and num2 > num3:\n maior = num2\nif num3 > num1 and num3 > num2:\n maior = num3\n\nprint('O maior é o numero {}'.format(maior))\nprint('O menor é o numero {}'.format(menor))\n","sub_path":"ex033.py","file_name":"ex033.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"624753415","text":"from django.shortcuts import render\nimport numpy as np\nfrom . import lab_3_gks as lb\n\n# Create your views here.\n\n\ndef index(request):\n return render(request, 'gks/index.html', {})\n\n\ndef table(request):\n if request.method == \"GET\":\n if 'amount' in request.GET:\n context = {\n 'amount': range(int(request.GET['amount'])),\n 'column': range(3)\n }\n return render(request, 'gks/table.html', context)\n return render(request, 'gks/table.html', {})\n\n\ndef result(request):\n choice_criteria = [int(i) for i in request.GET.getlist('criteria')]\n all_criteria = [1.1, 1.2, 1.3, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7,\\\n 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7]\n table_caption = ['']\n for i in range(len(all_criteria)):\n if i in choice_criteria:\n table_caption.append(all_criteria[i])\n table_caption.append('R')\n time_prepare = request.GET.getlist('matrix')\n length = len(time_prepare)\n time_prepare = np.asarray(time_prepare, dtype=\"float64\").reshape(int(length/3), -1).tolist()\n matrix = {}\n for i in range(len(time_prepare)):\n matrix[i+1] = time_prepare[i]\n johnson_plan = lb.johnson(matrix)\n johnson_diagram = lb.create_diagram(matrix, lb.johnson(matrix))\n del johnson_diagram[0]\n\n all_standard = []\n for i in lb.full_bout(matrix):\n all_standard.append(lb.standard(lb.create_diagram(matrix, i), choice_criteria))\n\n ans, all_compromise = lb.create_r(matrix, choice_criteria)\n standard_plan = lb.full_bout(matrix)[ans]\n standard_diagram = lb.create_diagram(matrix, standard_plan)\n del standard_diagram[0]\n\n for i in range(len(all_standard)):\n all_standard[i].append(round(all_compromise[i], 3))\n all_standard[i].insert(0, lb.full_bout(matrix)[i])\n # set of colors\n color_rgb = [(120, 0, 0), (0, 120, 0), (0, 0, 120), (120, 120, 0), (120, 0, 120), (120, 120, 120)]\n\n johnson_dict = {}\n for i in range(len(johnson_plan)):\n johnson_dict[johnson_plan[i]] = johnson_diagram[i]\n\n standard_dict = {}\n for i in range(len(standard_plan)):\n standard_dict[standard_plan[i]] = standard_diagram[i]\n\n context = {\n 'matrix': matrix,\n 'johnson_plan': johnson_plan,\n 'johnson_diagram': johnson_diagram,\n 'criteria': all_standard,\n 'criteria_diagram': standard_diagram,\n 'criteria_plan': standard_plan,\n 'length': range(36),\n 'choice_criteria': choice_criteria,\n 'all_compromise': all_compromise,\n 'name': lb.full_bout(matrix),\n 'table_caption': table_caption,\n 'color_rgb': color_rgb,\n 'johnson_dict': johnson_dict,\n 'criteria_dict': standard_dict\n }\n return render(request, 'gks/result.html', context)\n","sub_path":"gks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"77042319","text":"from vk_api.longpoll import VkLongPoll, VkEventType\nfrom vk_api.keyboard import VkKeyboard, VkKeyboardColor\nimport requests\nimport vk_api\nimport random\nimport wikipedia\n\ntoken ='ae75ea73cf3c85e5476823346af0c68d9dccdc64183a3152882eb129f2c6078bc0e783751194172464381'\nvk_session = vk_api.VkApi(token=token)\nvk = vk_session.get_api()\nlongpoll = VkLongPoll(vk_session)\nwikipedia.set_lang(\"RU\")\n\nif event.text == 'Википедия' or event.text == 'Вики' or event.text == 'википедия' or event.text == 'вики' or event.text == 'Wikipedia' or event.text == 'wikipedia' or event.text == 'Wiki' or event.text == 'wiki': \n if event.from_user: \n vk.messages.send(\n user_id=event.user_id,\n message='Введите запрос' \n\t)\n elif event.from_chat: \n vk.messages.send(\n chat_id=event.chat_id,\n message='Введите запрос' \n\t)\n for event in longpoll.listen():\n if event.type == VkEventType.MESSAGE_NEW and event.to_me and event.text: \n if event.from_user:\n vk.messages.send( \n user_id=event.user_id,\n message='Вот что я нашёл: \\n' + str(wikipedia.summary(event.text)) )\n break\n elif event.from_chat: \n vk.messages.send(\n chat_id=event.chat_id,\n message='Вот что я нашёл: \\n' + str(wikipedia.summary(event.text)))\n break \n continue\n\n\n","sub_path":"vkbot.py","file_name":"vkbot.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"297858065","text":"from django.conf.urls import url\nfrom . import views\nfrom feeds import LatestChanges\n\n#feeds = {\n# 'mods': LatestChanges,\n#}\n\nurlpatterns = [\n url(r'^$', views.BlockStatusView.as_view(), name='blockstatus'),\n #url(r'^rss/(?P.*)/$', 'django.contrib.syndication.views.Feed', {'feed_dict': feeds}),\n url(r'^rss/mods/$', LatestChanges()),\n #url(r'^choices/$', views.room_choices, name='choices'),\n url(r'^choices/$', views.choices, name='choices'),\n url(r'^result/$', views.result, name='result'),\n url(r'^add_block/$', views.add_block, name='add_block'),\n url(r'^all_rooms/$', views.AllRoomsView.as_view(), name='roomsdetails'),\n url(r'^update_rooms/$', views.manage_rooms, name='updateall'),\n url(r'^(?P[-\\w]+)/$', views.FlatDetailView.as_view(), name='flatobjectdetail'),\n url(r'^edit/(?P\\d+)/$', views.UpdateRoomView.as_view(), name='rooms-edit',),\n url(r'^$', views.ListRoomView.as_view(), name='rooms-list'),\n\n ]\n\n","sub_path":"bluemonkey/prostats/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"635473274","text":"class Node:\n def __init__(self,data):\n self.next = None\n self.data = data\n\nclass LinkedList():\n def __init__(self):\n self.head = None\n \n def display(self):\n cur_node = self.head\n while cur_node != None:\n print(cur_node.data)\n cur_node = cur_node.next\n \n def add_end(self,data):\n new_node = Node(data)\n\n if self.head is None:\n self.head = new_node\n else:\n traversing = self.head\n while traversing.next is not None:\n traversing = traversing.next\n traversing.next = new_node\n new_node = None\n\n def add_beg(self,data):\n new_node = Node(data)\n\n new_node.next = self.head\n self.head = new_node\n\n def add_after(self,data_after,data):\n new_node = Node(data)\n found = False\n cur_pos = self.head\n while cur_pos is not None:\n if cur_pos.data == data_after:\n found = True\n break\n cur_pos = cur_pos.next\n if found:\n new_node.next = cur_pos.next\n cur_pos.next = new_node\n else:\n print(\"Data not found\")\n\n def delete_node(self,data):\n cur_node = self.head\n if cur_node.data == data:\n self.head = self.head.next\n cur_node.next = None\n else:\n prev_node = self.head\n cur_node = prev_node.next\n while cur_node is not None:\n if cur_node.data == data:\n prev_node.next = cur_node.next\n cur_node.next = None\n prev_node = prev_node.next\n cur_node = cur_node.next\n\n\n\nl = LinkedList()\nl.add_end(1)\nl.add_end(2)\nl.add_beg(3)\nl.add_after(2,4)\nl.display()\nl.delete_node(3)\nprint()\nl.display()","sub_path":"SinglyLinkedList.py","file_name":"SinglyLinkedList.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"364137526","text":"\nimport sys\nimport numpy as np\nfrom math import sqrt\nimport argparse\nimport logging\nimport shutil\nimport MDAnalysis\n\nfrom IPython import embed\n\nfrom scipy.spatial import cKDTree\nimport itertools\n#from skimage import measure\n\nfrom rhoutils import rho, cartesian\nfrom mdtools import ParallelTool\n\nfrom constants import SEL_SPEC_HEAVIES, SEL_SPEC_HEAVIES_NOWALL\nfrom mdtools.fieldwriter import RhoField\nimport sys\nimport argparse, os\nfrom scipy.interpolate import interp2d\n\nimport matplotlib as mpl\n\nfrom skimage import measure\nfrom scipy.optimize import curve_fit\nmyorange = myorange = plt.rcParams['axes.prop_cycle'].by_key()['color'][1] \n# Fits a circle, centered at (r,x) = (0,a), radius b, to: x = a + sqrt(b**2 - r**2)\nfn_fit = lambda rvals, a, b: a + np.sqrt(b**2 - rvals**2)\n\n## Parameters for circle fitting\n# In (a, b); a is x pt of circle center, b is circle radius\nparam_lb = np.array([-np.inf, 0])\nparam_ub = np.array([0, np.inf])\nbounds = (param_lb, param_ub)\np0 = np.array([-1, 2])\n\n\n# Find xs, point where line between (xlo, ylo) and (xhi, yhi) crosses ys\ndef interp1d(xlo, xhi, ylo, yhi, ys=0.5):\n\n m = (yhi - ylo) / (xhi - xlo)\n\n if m == 0:\n return xlo\n\n return xlo + (ys-ylo)/m\n\n\n# Given a density field (shape: (xvals.size, yvals.size, zvals.size)), find \n# linearly interpolated points where we cross isovalue\ndef get_interp_points(rho, xvals, yvals, zvals, iso=0.5):\n\n\n rho_mask = (rho > iso).astype(int)\n xcross = np.diff(rho_mask, axis=0).astype(bool)\n ycross = np.diff(rho_mask, axis=1).astype(bool)\n\n dx, dy = np.gradient(rho_mask)\n\n pts = []\n for ix in range(xvals.size-1):\n xlo = xvals[ix]\n xhi = xvals[ix+1]\n\n for iy in range(yvals.size-1):\n ylo = yvals[iy]\n yhi = yvals[iy+1]\n\n\n bxcross = xcross[ix, iy]\n bycross = ycross[ix, iy]\n\n if not (bxcross or bycross):\n continue\n\n\n ptx = interp1d(xlo, xhi, rho[ix, iy], rho[ix+1, iy], ys=iso) if bxcross else xlo\n pty = interp1d(ylo, yhi, rho[ix, iy], rho[ix, iy+1], ys=iso) if bycross else ylo\n\n pts.append(np.array([ptx, pty]))\n\n # last col of x\n for iy in range(yvals.size-1):\n ylo = yvals[iy]\n yhi = yvals[iy+1]\n\n if not ycross[-1, iy]:\n continue\n\n pty = interp1d(ylo, yhi, rho[-1, iy], rho[-1, iy+1], ys=iso)\n\n pts.append(np.array([xvals[-1], pty]))\n\n # last col of y\n # last col of x\n for ix in range(xvals.size-1):\n xlo = xvals[ix]\n xhi = xvals[ix+1]\n\n if not xcross[ix, -1]:\n continue\n\n ptx = interp1d(xlo, xhi, rho[ix, -1], rho[ix+1, -1], ys=iso)\n\n pts.append(np.array([ptx, yvals[-1]]))\n\n return np.array(pts)\n\n\nds = np.load('rhoxyz.dat.npz')\nxbins = ds['xbins']\nybins = ds['ybins']\nzbins = ds['zbins']\n\nxvals = xbins[:-1] + 0.5*np.diff(xbins)\nyvals = ybins[:-1] + 0.5*np.diff(ybins)\nzvals = zbins[:-1] + 0.5*np.diff(zbins)\n\nrho = ds['rho'].mean(axis=0)\n\nxx, yy = np.meshgrid(xbins, ybins, indexing='ij')\ndx = np.diff(xvals)[0]\ndy = np.diff(yvals)[0]\ndz = np.diff(zvals)[0]\n\n\navg_rho = rho / (0.033*dx*dy*dz)\navg_rho = np.clip(avg_rho, 0, 1)\nmask_rho = (avg_rho > 0.5).astype(int)\n\n\ntest1 = np.array([[1,0,0],\n [1,0,0]])[::-1,:].T\ntest2 = np.array([[1,1,0],\n [1,0,0]])[::-1,:].T\n\nxbins = np.arange(4)\nybins = np.arange(3)\nzbins = np.arange(2)\n\nxvals = xbins[:-1] #+ 0.5*np.diff(xbins)\nyvals = ybins[:-1] #+ 0.5*np.diff(ybins)\nzvals = zbins[:-1] #+ 0.5*np.diff(zbins)\n\nxx, yy = np.meshgrid(xbins, ybins, indexing='ij')\n\n","sub_path":"scratch/cyl_sam/old/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"596360681","text":"from django import template\n\nregister = template.Library()\n\n@register.filter(name='week')\ndef week(value):\n return {\n 0: 'Lundi',\n 1: 'Mardi',\n 2: 'Mercredi',\n 3: 'Jeudi',\n 4: 'Vendredi',\n 5: 'Samedi',\n 6: 'Dimanche',\n }[value]\n","sub_path":"agenda/templatetags/extra_filters.py","file_name":"extra_filters.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"466925575","text":"#!/usr/bin/env python\n\"\"\"\nCommand Parser module\n---------------------\nImplements the ``insights`` command line. Each function is the first\nargument followed by the function specific arguments. See USAGE text\nbelow.\n\"\"\"\nfrom __future__ import print_function\n\nimport argparse\nimport sys\n\nUSAGE = \"\"\"insights []\nAvailable commands:\n cat Execute a spec and show the output\n collect Collect all specs against the client and create an Insights archive.\n inspect Execute component and shell out to ipython for evaluation.\n info View info and docs for Insights Core components.\n run Run insights-core against host or an archive.\n\"\"\"\n\n\nclass InsightsCli(object):\n \"\"\"\n Class to implement the cli module.\n Each command is called as a method of this class and all\n arg parsing is performed in the separate module that\n actually implements the command. the args \"insights command\"\n are not passed to the submodule.\n \"\"\"\n\n def __init__(self):\n parser = argparse.ArgumentParser(\n description=\"Insights Core command line execution\",\n usage=USAGE)\n parser.add_argument('command', help='Insights Core command to run')\n args = parser.parse_args(sys.argv[1:2])\n if not hasattr(self, args.command):\n print('Unrecognized command')\n parser.print_help()\n sys.exit(1)\n # remove the sub-command arg from sys.argv\n sys.argv.pop(1)\n # Use dispatch pattern to execute command\n getattr(self, args.command)()\n\n def cat(self):\n from .tools.cat import main as cat_main\n cat_main()\n\n def collect(self):\n from .collect import main as collect_main\n collect_main()\n\n def info(self):\n from .tools.query import main as query_main\n query_main()\n\n def inspect(self):\n from .tools.insights_inspect import main as inspect_main\n inspect_main()\n\n def run(self):\n from insights import run\n if \"\" not in sys.path:\n sys.path.insert(0, \"\")\n run(print_summary=True)\n\n\ndef main():\n InsightsCli()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"insights/command_parser.py","file_name":"command_parser.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"557638219","text":"import wx\r\n \r\n#frame\r\nclass window (wx.Frame):\r\n def __init__(self):\r\n wx.Frame.__init__(self,None,-1,'Calculadora Básica en Python, Programacion Avanzada',size=(420,420))\r\n \r\n #presentacion\r\n a=wx.MessageDialog(None,'hola mundo! \\n Soy una Calculadora \\n mi creador es Ali Perez Gomez','Ingenieria Electronica',style=wx.OK)\r\n b=a.ShowModal()\r\n \r\n #barra de menu\r\n status=self.CreateStatusBar()\r\n menu=wx.MenuBar()\r\n creditos=wx.Menu()\r\n contactos=wx.Menu()\r\n salir=wx.Menu()\r\n \r\n creditos.Append(wx.ID_ABOUT,'Creditos', 'Agradecimientos por Colaboracion')\r\n wx.EVT_MENU(self,wx.ID_ABOUT, self.creditos)\r\n \r\n contactos.Append(wx.ID_ADD, 'Contactar a Ali Perez Gomez', '¿Nesecitas Ayuda?')\r\n wx.EVT_MENU(self,wx.ID_ADD, self.contactar)\r\n \r\n contactos.Append(wx.ID_APPLY, 'Paginas con contenido acerca de Python', 'Ali Perez Gomez')\r\n wx.EVT_MENU(self,wx.ID_APPLY, self.paginas)\r\n \r\n salir.Append(wx.ID_EXIT,\"Salir\", \"Vuelve pronto al sistema de Electronica\")\r\n wx.EVT_MENU(self,wx.ID_EXIT, self.salir)\r\n \r\n menu.Append(creditos,'Creditos')\r\n menu.Append(contactos, 'Contactos')\r\n menu.Append(salir, 'Salir')\r\n \r\n self.SetMenuBar(menu)\r\n \r\n #botones\r\n suma = wx.Button(self, label = '+', pos = (100 - 60 - 15, 230), size = (60,25))\r\n suma.Bind(wx.EVT_BUTTON,self.suma)\r\n \r\n resta = wx.Button(self, label = '-', pos = (200 - 60 - 15, 230), size = (60,25))\r\n resta.Bind(wx.EVT_BUTTON,self.resta)\r\n \r\n multiplica = wx.Button(self, label = '*', pos = (300 - 60 - 15, 230), size = (60,25))\r\n multiplica.Bind(wx.EVT_BUTTON,self.multiplica)\r\n \r\n divide = wx.Button(self, label = '/', pos = (400 - 60 - 15, 230), size = (60,25))\r\n divide.Bind(wx.EVT_BUTTON,self.divide)\r\n \r\n limpia= wx.Button(self, label='Limpiar', pos=(250 - 60 - 15, 280),size=(60,25))\r\n limpia.Bind(wx.EVT_BUTTON,self.erradicador)\r\n \r\n #text box\r\n self.valor1 = wx.TextCtrl(self, pos = (10, 30), size = (400 - 120 - 15 - 10, 25), style=wx.TE_PROCESS_ENTER,value='Ingrese el Primer Valor')\r\n self.valor2 = wx.TextCtrl(self, pos = (10, 100), size = (400 - 120 - 15 - 10, 25), style=wx.TE_PROCESS_ENTER,value='Ingrese el Segundo Valor')\r\n self.resultado = wx.TextCtrl(self, pos = (10, 170), size = (400 - 120 - 15 - 10, 25),style=wx.TE_PROCESS_ENTER)\r\n \r\n #labels\r\n label1 = wx.StaticText(self,label='Valor 1', size = (400 - 120 - 15 - 10, 25),pos = (10, 8))\r\n label2 = wx.StaticText(self,label='Valor 2',size = (400 - 120 - 15 - 10, 25),pos = (10, 78))\r\n label3 = wx.StaticText(self,label='Resultado',size = (400 - 120 - 15 - 10, 25),pos = (10, 148))\r\n label4 = wx.StaticText(self,label='Electronica Avanzada',size = (300 - 120 - 15 - 10, 25),pos = (10, 298))\r\n \r\n \r\n \r\n self.Show(True)\r\n \r\n #Eventos\r\n \r\n \r\n def creditos(self,event):#creditos\r\n salir=wx.MessageDialog(None, 'desarrollado por Ali Perez Gomez \\n Colaborador 1: \\n Colaborador 2:', 'Creditos', style=wx.OK)\r\n salir.ShowModal()\r\n \r\n \r\n def salir(self,event):#Salir\r\n salir=wx.MessageDialog(None, 'Saludos :,(','Salir', style=wx.OK)\r\n salir.ShowModal()\r\n self.Close(True)\r\n \r\n def contactar(self,event):#contactar a Ali Perez Gomez\r\n salir=wx.MessageDialog(None, 'aperezg@itesco.edumx \\n ,Contactar a Ali Perez Gomez' , style=wx.OK)\r\n salir.ShowModal()\r\n \r\n \r\n def paginas(self,event):#paginas python\r\n salir=wx.MessageDialog(None, 'www.itesco.edu.mx \\n Python.org \\n wxpython.org','Paginas de Python', style=wx.OK)\r\n salir.ShowModal()\r\n \r\n \r\n def suma(self,event):#Suma\r\n self.resultado.SetLabel(str(int (self.valor1.GetValue())+ int (self.valor2.GetValue())))\r\n resultado=wx.MessageDialog(None, 'su resultado es '+str(int (self.valor1.GetValue())+ int (self.valor2.GetValue())),'Resultado',style=wx.OK)\r\n resultado.ShowModal()\r\n \r\n \r\n def resta(self,event):#Resta\r\n self.resultado.SetLabel(str(int (self.valor1.GetValue())- int (self.valor2.GetValue())))\r\n resultado=wx.MessageDialog(None, 'su resultado es '+str(int (self.valor1.GetValue())- int (self.valor2.GetValue())),'Resultado',style=wx.OK)\r\n resultado.ShowModal()\r\n \r\n def multiplica(self,event):#Multiplica\r\n self.resultado.SetLabel(str(int (self.valor1.GetValue())* int (self.valor2.GetValue())))\r\n resultado=wx.MessageDialog(None, 'su resultado es '+ str(int (self.valor1.GetValue())* int (self.valor2.GetValue())),'Resultado',style=wx.OK)\r\n resultado.ShowModal()\r\n \r\n def divide(self,event):#Divide\r\n self.resultado.SetLabel(str(int (self.valor1.GetValue())/ int (self.valor2.GetValue())))\r\n resultado=wx.MessageDialog(None, 'su resultado es '+str(int (self.valor1.GetValue())/ int (self.valor2.GetValue())),'Resultado',style=wx.OK)\r\n resultado.ShowModal()\r\n \r\n def erradicador (self,event):#el limpiador XD\r\n self.valor1.SetLabel('Por Favor Ingrese el Primer Valor')\r\n self.valor2.SetLabel('Por Favor Ingrese el Segundo Valor')\r\n self.resultado.SetLabel('')\r\n erradicador=wx.MessageDialog(None,'Sector Clear \\nReady to Continue', 'Erradicador',wx.OK)\r\n erradicador.ShowModal()\r\n \r\napp = wx.App()\r\na=window()\r\napp.MainLoop()","sub_path":"calculadora/Practica_Laboratorio_2.py","file_name":"Practica_Laboratorio_2.py","file_ext":"py","file_size_in_byte":5668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"559148686","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QFileDialog,QMessageBox\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nimport matplotlib.pyplot as plt\nimport sys\nimport numpy as np\nfrom PyQt5.QtWidgets import QGridLayout\n\nfrom Ui_stock_pool_track import Ui_MainWindow\n\n#创建一个matplotlib图形绘制类\nclass MyFigure(FigureCanvas):\n def __init__(self,width=5, height=4, dpi=100):\n #第一步:创建一个创建Figure\n self.fig = Figure(figsize=(width, height), dpi=dpi)\n #第二步:在父类中激活Figure窗口\n super(MyFigure,self).__init__(self.fig) #此句必不可少,否则不能显示图形\n #第三步:创建一个子图,用于绘制图形用,111表示子图编号,如matlab的subplot(1,1,1)\n self.axes = self.fig.add_subplot(111)\n #第四步:就是画图,【可以在此类中画,也可以在其它类中画】\n def plotsin(self):\n self.axes0 = self.fig.add_subplot(111)\n t = np.arange(0.0, 3.0, 0.01)\n s = np.sin(2 * np.pi * t)\n self.axes0.plot(t, s)\n def plotcos(self):\n t = np.arange(0.0, 3.0, 0.01)\n s = np.sin(2 * np.pi * t)\n self.axes.plot(t, s)\n\n\nclass MainDialogImgBW(QtWidgets.QMainWindow, Ui_MainWindow):\n def __init__(self):\n super(MainDialogImgBW,self).__init__()\n self.setupUi(self)\n self.setWindowTitle(\"显示matplotlib绘制图形\")\n self.setMinimumSize(0,0)\n\n #第五步:定义MyFigure类的一个实例\n self.F = MyFigure(width=3, height=2, dpi=100)\n #self.F.plotsin()\n self.plotcos()\n #第六步:在GUI的groupBox中创建一个布局,用于添加MyFigure类的实例(即图形)后其他部件。\n self.gridlayout = QGridLayout(self.widget) # 继承容器groupBox\n self.gridlayout.addWidget(self.F,0,0)\n\n #补充:另创建一个实例绘图并显示\n self.plotother()\n\n def plotcos(self):\n t = np.arange(0.0, 5.0, 0.01)\n s = np.cos(2 * np.pi * t)\n self.F.axes.plot(t, s)\n self.F.fig.suptitle(\"cos\")\n\n def plotother(self):\n F1 = MyFigure(width=5, height=4, dpi=100)\n F1.fig.suptitle(\"Figuer_4\")\n F1.axes1 = F1.fig.add_subplot(221)\n x = np.arange(0, 50)\n y = np.random.rand(50)\n F1.axes1.hist(y, bins=50)\n F1.axes1.plot(x, y)\n F1.axes1.bar(x, y)\n F1.axes1.set_title(\"hist\")\n F1.axes2 = F1.fig.add_subplot(222)\n\n ## 调用figure下面的add_subplot方法,类似于matplotlib.pyplot下面的subplot方法\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n y = [23, 21, 32, 13, 3, 132, 13, 3, 1]\n F1.axes2.plot(x, y)\n F1.axes2.set_title(\"line\")\n # 散点图\n F1.axes3 = F1.fig.add_subplot(223)\n F1.axes3.scatter(np.random.rand(20), np.random.rand(20))\n F1.axes3.set_title(\"scatter\")\n # 折线图\n F1.axes4 = F1.fig.add_subplot(224)\n x = np.arange(0, 5, 0.1)\n F1.axes4.plot(x, np.sin(x), x, np.cos(x))\n F1.axes4.set_title(\"sincos\")\n self.gridlayout.addWidget(F1, 0, 1)\n \n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n main = MainDialogImgBW()\n main.show()\n #app.installEventFilter(main)\n sys.exit(app.exec_())","sub_path":"trade_stock_digu/stock_pool_track/main1.py","file_name":"main1.py","file_ext":"py","file_size_in_byte":3400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"606324383","text":"\n\ndef dbfToCSV(in_dbf, out_csv):\n \"\"\"\n dbfToCSV(in_dbf, out_csv)\n \n Convert input .dbf file into .csv file located in out_csv\n \"\"\"\n \n import csv\n from dbfpy import dbf\n import sys\n\n dbf_fn = in_dbf\n csv_fn = out_csv\n\n in_db = dbf.Dbf(dbf_fn)\n out_csv = csv.writer(open(csv_fn, 'wb'))\n\n names = []\n for field in in_db.header.fields:\n names.append(field.name)\n out_csv.writerow(names)\n\n for rec in in_db:\n out_csv.writerow(rec.fieldData)\n\n in_db.close()\n","sub_path":"utilities/dbfToCSV.py","file_name":"dbfToCSV.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"317605007","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@Time : 2019/7/4 10:50\r\n@Author : geqh\r\n@file : tag_EN.py\r\n\"\"\"\r\nimport nltk\r\nfrom utils.log import *\r\nfrom utils.confPaser import *\r\nimport os\r\n\r\nnltk.download(\"punkt\")\r\nnltk.download(\"averaged_perceptron_tagger\")\r\n\r\n\r\ndef load_dict():\r\n dic_path = os.path.join(project_path, \"conf/dic_en.txt\")\r\n map = {}\r\n with open(dic_path, \"r\", encoding=\"utf8\") as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n one = line.split(\"-\")\r\n items = one[1].split(\";\")\r\n for item in items:\r\n map[item.strip(\"\\n\")] = one[0]\r\n logger.info(\"load_dict:\"+dic_path)\r\n return map\r\n\r\n\r\ndef tagEN(text, dict_map):\r\n english_punctuations = [',', '.', ':', ';', '?', '(', ')', '[', ']',\r\n '&', '!', '*', '@', '#', '$', '%', '<', '>', '\\'',\r\n '\\\"', '\\\\', '/']\r\n words = nltk.word_tokenize(text)\r\n pos_tags = nltk.pos_tag(words)\r\n tags = []\r\n for pos_tag in pos_tags:\r\n tag = str(pos_tag[1]).upper().strip()\r\n word = pos_tag[0].strip()\r\n if dict_map.get(tag, 0):\r\n tags.append({\"word\": word, \"tag\": tag, \"pos\": dict_map[tag]})\r\n elif word in english_punctuations:\r\n tags.append({\"word\": word, \"tag\": tag, \"pos\": \"标点\"})\r\n else:\r\n tags.append({\"word\": word, \"tag\": tag, \"pos\": \"X\"})\r\n\r\n logger.info(tags)\r\n return tags\r\n","sub_path":"tagEN/tag_EN.py","file_name":"tag_EN.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"57769106","text":"from . import halconfig_types as types\nfrom . import halconfig_dependency as dep\n\nname = \"BUTTON\"\ndisplayname = \"Button\"\ncompatibility = dep.Dependency() # = all\nenable = {\n \"define\": \"BSP_BUTTON_PRESENT\",\n \"description\": \"Buttons present on board\",\n}\noptions = {\n \"BSP_BUTTON\": {\n \"type\": types.PinArray(\n \"BSP_BUTTON\",\n min=0,\n max=8,\n default=2,\n item_description=\"Button %n\"\n ),\n \"description\": \"Number of buttons available on board\",\n \"allowedconflicts\": [\"BSP_LED\", \"BSP_BTL_BUTTON\"],\n \"longdescription\": \"Number of buttons physically present in hardware [0-8]\"\n },\n \"BSP_BUTTON_GPIO_DOUT\": {\n \"type\": \"enum\",\n \"description\": \"DOUT register value of button pins\",\n \"values\": [\n types.EnumValue(\"HAL_GPIO_DOUT_LOW\", \"Low\"),\n types.EnumValue(\"HAL_GPIO_DOUT_HIGH\", \"High\")\n ],\n \"longdescription\": \"Set to high/low to enable pullup or pulldown respectively when in input mode with pull. Set to high to enable filter in input only mode.\",\n },\n \"BSP_BUTTON_GPIO_MODE\": {\n \"type\": \"enum\",\n \"description\": \"MODE register value of button pins\",\n \"values\": [\n types.EnumValue(\"HAL_GPIO_MODE_INPUT\", \"Input\"),\n types.EnumValue(\"HAL_GPIO_MODE_INPUT_PULL\", \"Input with pullup/down\"),\n types.EnumValue(\"HAL_GPIO_MODE_INPUT_PULL_FILTER\", \"Input with pullup/down and filter\")\n ],\n \"longdescription\": \"Set the GPIO mode for the pins used for buttons.\",\n },\n \"HAL_BUTTON_COUNT\": {\n \"type\": \"uint8_t\",\n \"description\": \"Number of buttons to enable\",\n \"min\": \"0\",\n \"max\": \"255\",\n \"advanced\": True,\n \"longdescription\": \"Cannot exceed number of buttons configured as available above.\",\n },\n \"HAL_BUTTON_ENABLE\": {\n \"type\": \"array\",\n \"description\": \"List of button indices to enable\",\n \"defaultValue\": \"0, 1\",\n \"advanced\": True,\n \"longdescription\": \"Comma separated list of the buttons that should be enabled for this application. Example: Board has 4 buttons, but application only uses button 0 and 2. Configure the list of buttons to enable as \\\"0, 2\\\".\",\n },\n}\n","sub_path":"platform/hwconf_data/efm32zg/modules/BUTTON/BUTTON_model.py","file_name":"BUTTON_model.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"188912736","text":"import requests\nimport csv\n\ndef main():\n url = 'http://stats.nba.com/stats/leaguedashteamstats?' + \\\n 'Conference=&' + \\\n 'DateFrom=&' + \\\n 'DateTo=&' + \\\n 'Division=&' + \\\n 'GameScope=&' + \\\n 'GameSegment=&' + \\\n 'LastNGames=0&' + \\\n 'LeagueID=00&' + \\\n 'Location=&' + \\\n 'MeasureType=Base&' + \\\n 'Month=0&' + \\\n 'OpponentTeamID=0&' + \\\n 'Outcome=&' + \\\n 'PORound=0&' + \\\n 'PaceAdjust=N&' + \\\n 'PerMode=PerGame&' + \\\n 'Period=0&' + \\\n 'PlayerExperience=&' + \\\n 'PlayerPosition=&' + \\\n 'PlusMinus=N&' + \\\n 'Rank=N&' + \\\n 'Season=2014-15&' + \\\n 'SeasonSegment=&' + \\\n 'SeasonType=Regular+Season&' + \\\n 'ShotClockRange=&' + \\\n 'StarterBench=&' + \\\n 'TeamID=0&' + \\\n 'VsConference=&' + \\\n 'VsDivision='\n\n response = requests.get(url)\n\n # Parse API Response\n\n results = response.json()['resultSets']\n headers = results[0]['headers']\n stats = results[0]['rowSet'] \n stats.insert(0, headers)\n\n # Write to csv\n\n filename = 'stats.csv'\n with open(filename, 'wb') as f:\n writer = csv.writer(f)\n writer.writerows(stats)\n\n # Print API request parameters\n\n print(response.json()['parameters'])\n\nif __name__ == '__main__':\n main()\n","sub_path":"nba/scripts/nba_api.py","file_name":"nba_api.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"122755793","text":"# ----------------------------\n# gal for python (galpy)\n# 2019.10.05 Sat. by A.Arimura\n# 2020.1.1 Wed. by A.Arimura\n# ----------------------------\nimport random\nimport yaml\n#import re\n# import MeCab\n\n# mecab = MeCab.Tagger('/usr/local/lib/mecab/dic/mecab-ipadic-neologd')\n\nclass GalPy:\n def __init__(self, name): \n with open('./vocab/config_02.yaml', mode='r') as f:\n self.conf = yaml.load(f, Loader=yaml.SafeLoader)\n # ギャルの名前のリスト\n self.name_list = self.conf['name']\n # 返答のリスト\n self.responses = self.conf['answer_words']\n # 出会いの挨拶リスト\n self.open_greets = self.conf['open_greets']\n # お別れの挨拶のリスト\n self.end_greets = self.conf['end_greets']\n # 名前決め\n self.name = self.get_name(name, self.name_list)\n\n def get_name(self, name, name_list):\n s = ''\n if name == 'rand':\n s = random.choice(name_list)\n return s\n else:\n s = name\n return s\n\n def kotaeru(self, word):\n keys = ('ガンバ', '頑張', 'がんば')\n ans = ''\n judge = lambda x: x in word\n if any(map(judge, keys)):\n ans = 'すごみ is ある'\n return ans\n else:\n ans = random.choice(self.responses)\n return ans\n","sub_path":"d_galpy.py","file_name":"d_galpy.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"430229021","text":"from __future__ import division\n\nimport math\nimport torch\nfrom torch.autograd import Variable\n\nimport onmt\n\n\nclass Dataset(object):\n def __init__(self, srcData, tgtData, batchSize, cuda, context_size, volatile=False):\n self.src = srcData\n if tgtData:\n self.tgt = tgtData\n assert(len(self.src) == len(self.tgt))\n else:\n self.tgt = None\n self.cuda = cuda\n\n self.batchSize = batchSize\n self.numBatches = math.ceil(len(self.src) / batchSize)\n self.volatile = volatile\n self.context_size = context_size\n\n def _batchify(self, data, align_right=False):\n lengths = [x.size(0) for x in data]\n max_length = max(lengths)\n out = data[0].new(len(data), max_length).fill_(onmt.Constants.PAD)\n for i in range(len(data)):\n data_length = data[i].size(0)\n offset = max_length - data_length if align_right else 0\n out[i].narrow(0, offset, data_length).copy_(data[i])\n\n return out\n\n def _batchify_context(self, data, align_right=False):\n batch_size = len(data)\n context_size = len(data[0])\n lengths = [[sen.size(0) for sen in context] for context in data]\n max_length = max([max(context_lengths) for context_lengths in lengths])\n out = data[0][0].new(context_size, batch_size,\n max_length).fill_(onmt.Constants.PAD)\n for i in range(len(data)):\n for ci in range(len(data[i])):\n data_length = data[i][ci].size(0)\n offset = max_length - data_length if align_right else 0\n out[ci][i].narrow(0, offset, data_length).copy_(data[i][ci])\n\n return out\n\n def __getitem__(self, index):\n assert index < self.numBatches, \"%d > %d\" % (index, self.numBatches)\n if self.context_size > 1:\n srcBatch = self._batchify_context(\n self.src[index * self.batchSize:(index + 1) * self.batchSize], align_right=True)\n else:\n srcBatch = self._batchify(\n self.src[index * self.batchSize:(index + 1) * self.batchSize], align_right=True)\n if self.tgt:\n tgtBatch = self._batchify(\n self.tgt[index * self.batchSize:(index + 1) * self.batchSize])\n else:\n tgtBatch = None\n\n def wrap(b):\n if b is None:\n return b\n b = b.transpose(b.dim() - 2, b.dim() - 1).contiguous()\n if self.cuda:\n b = b.cuda()\n b = Variable(b, volatile=self.volatile)\n return b\n\n return wrap(srcBatch), wrap(tgtBatch)\n\n def __len__(self):\n return self.numBatches\n\n def shuffle(self):\n data = list(zip(self.src, self.tgt))\n self.src, self.tgt = zip(*[data[i] for i in torch.randperm(len(data))])\n","sub_path":"memories/diag_dataset.py","file_name":"diag_dataset.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"497337437","text":"load(\"@bazel_skylib//lib:collections.bzl\", \"collections\")\nload(\"@bazel_skylib//lib:paths.bzl\", \"paths\")\nload(\n \"@fbcode_macros//build_defs/lib:python_typing.bzl\",\n \"gen_typing_config\",\n \"get_typing_config_target\",\n)\nload(\"@fbcode_macros//build_defs/lib:src_and_dep_helpers.bzl\", \"src_and_dep_helpers\")\nload(\"@fbcode_macros//build_defs/lib:visibility.bzl\", \"get_visibility\")\nload(\"@fbcode_macros//build_defs:platform_utils.bzl\", \"platform_utils\")\nload(\"@fbsource//tools/build_defs:fb_native_wrapper.bzl\", \"fb_native\")\n\ndef _get_url_basename(url):\n \"\"\" Urls will have an #md5 etag remove it and return the wheel name\"\"\"\n return paths.basename(url).rsplit(\"#md5=\")[0]\n\ndef _is_compiled(url):\n \"\"\"\n Returns True if wheel with provided url is precompiled.\n\n The logic in this method is a less efficient version of\n -cp[0-9]{2}- regex matching.\n \"\"\"\n prefix = \"-cp\"\n start = 0\n for _ in range(len(url)):\n start = url.find(prefix, start)\n if start == -1 or start + 6 >= len(url):\n break\n if url[start + len(prefix)].isdigit() and \\\n url[start + len(prefix) + 1].isdigit() and \\\n url[start + len(prefix) + 2] == \"-\":\n return True\n start += len(prefix)\n return False\n\ndef _remote_wheel(url, out, sha1, visibility):\n remote_file_name = out + \"-remote\"\n fb_native.remote_file(\n name = remote_file_name,\n visibility = get_visibility(visibility, remote_file_name),\n out = out,\n url = url,\n sha1 = sha1,\n )\n return \":\" + remote_file_name\n\ndef _prebuilt_target(wheel, remote_target, visibility):\n fb_native.prebuilt_python_library(\n name = wheel,\n visibility = get_visibility(visibility, wheel),\n binary_src = remote_target,\n )\n return \":\" + wheel\n\ndef _override_wheels(deps, wheel_platform):\n # For all deps, override the current wheel file with the one corresponding\n # to the specified wheel platform.\n\n # We're doing this because platforms in the list of deps are also re.escaped.\n wheel_platform = platform_utils.escape(wheel_platform)\n\n override_urls = None\n for platform, urls in deps:\n if wheel_platform in platform:\n override_urls = urls\n\n if not override_urls:\n return deps\n\n new_deps = []\n for platform, _ in deps:\n new_deps.append((platform, override_urls))\n\n return new_deps\n\ndef python_wheel(\n version,\n platform_urls, # Dict[str, str] # platform -> url\n deps = (),\n external_deps = (),\n tests = (),\n visibility = None):\n # We don't need duplicate targets if we have multiple usage of URLs\n urls = collections.uniq(platform_urls.values())\n wheel_targets = {} # Dict[str, str] # url -> prebuilt_target_name\n\n compiled = False\n\n # Setup all the remote_file and prebuilt_python_library targets\n # urls have #sha1= at the end.\n for url in urls:\n if url == None:\n continue\n if _is_compiled(url):\n compiled = True\n orig_url, _, sha1 = url.rpartition(\"#sha1=\")\n if not sha1:\n fail(\"There is no #sha1= tag on the end of URL: \" + url)\n\n # Opensource usage of this may have #md5 tags from pypi\n wheel = _get_url_basename(orig_url)\n target_name = _remote_wheel(url, wheel, sha1, visibility)\n target_name = _prebuilt_target(wheel, target_name, visibility)\n wheel_targets[url] = target_name\n\n attrs = {}\n\n # Create the ability to override the platform that wheels use\n wheel_platform = native.read_config(\"python\", \"wheel_platform_override\")\n\n # Use platform_deps to rely on the correct wheel target for\n # each platform\n platform_deps = [\n (\"{}$\".format(platform_utils.escape(py_platform)), None if (url == None) else [wheel_targets[url]])\n for py_platform, url in sorted(platform_urls.items())\n # Some platforms just do not have wheels available. In this case, we remove\n # that platform from platform deps. You just won't get a whl on those\n # platforms. HOWEVER: Due to how platforms work in buck, if there's a\n # wheel_platform, we want to keep this platform. We keep it because a user\n # might still get something like 'gcc5-blah' as the buck native platform\n # even when we've overwritten all urls with say a mac specific url.\n # It sucks, and when select() and platform support is in buck and handled\n # properly by all rules, this will be wholly re-evaluated.\n if url or wheel_platform\n ]\n\n if wheel_platform:\n platform_deps = _override_wheels(platform_deps, wheel_platform)\n\n # This is to work around how buck instantiates toolchains. Without this,\n # we don't always end up properly instantiating the c++ toolchains if\n # the compiler is a python script. T34675852\n cpp_genrule_name = version + \"-genrule-hack\"\n fb_native.cxx_genrule(\n name = cpp_genrule_name,\n out = \"dummy\",\n cmd = \"echo '' > $OUT\",\n )\n deps = (deps or []) + [\":\" + cpp_genrule_name]\n\n if external_deps:\n if compiled:\n attrs[\"exclude_deps_from_merged_linking\"] = True\n platform_deps.extend(\n src_and_dep_helpers.format_platform_deps(\n [\n src_and_dep_helpers.normalize_external_dep(d, lang_suffix = \"-py\")\n for d in external_deps\n ],\n ),\n )\n\n if tests:\n attrs[\"tests\"] = tests\n\n # TODO: Figure out how to handle typing info from wheels\n if get_typing_config_target():\n gen_typing_config(version, visibility = visibility)\n fb_native.python_library(\n name = version,\n deps = deps,\n platform_deps = platform_deps,\n visibility = get_visibility(visibility, version),\n **attrs\n )\n","sub_path":"infra_macros/fbcode_macros/build_defs/python_wheel.bzl","file_name":"python_wheel.bzl","file_ext":"bzl","file_size_in_byte":5903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"579541779","text":"import numpy as np\nimport gym\nimport random\nimport tensorflow as tf\nimport time\n\nEPISODES = 50000\nGAMMA = 0.99\nPRE_TRAIN_STEPS = 0 # Number of steps used before training updates begin.\nEPSILON = 0.1\nEPSILON_MIN = 0.1\nEPSILON_DECAY = 0.995\nLEARNING_RATE = 0.001\nTIMES = 500\nRENDER = True\n\nenv = gym.make('FrozenLake-v0')\n# env = gym.wrappers.Monitor(env, 'exp_n1')\n\nstate_dim = env.observation_space.n\naction_dim = env.action_space.n\n\ntf.reset_default_graph()\n\ninputs = tf.placeholder(shape=[1, state_dim], dtype=tf.float32)\n\nW = tf.get_variable(\n \"W\",\n shape=[state_dim, action_dim],\n initializer=tf.zeros_initializer)\n\nQ_out = tf.matmul(inputs, W)\npredict = tf.argmax(Q_out, 1)\n\nQ_next = tf.placeholder(shape=[1, action_dim], dtype=tf.float32)\nloss = tf.reduce_sum(tf.square(Q_next - Q_out))\ntrain_op = tf.train.GradientDescentOptimizer(learning_rate=LEARNING_RATE).minimize(loss)\n\n# 'Saver' op to save and restore all the variables\nsaver = tf.train.Saver()\n\ndef one_hot_encoding(x):\n return np.identity(state_dim)[x:x+1]\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n # Restore model weights from previously saved model\n saver.restore(sess, \"save/frozen-dqn/frozen-dqn.ckpt\")\n\n total_steps = 0\n rewards = []\n for episode in range(0, EPISODES):\n state = env.reset()\n total_reward = 0\n for _ in range(TIMES):\n if RENDER:\n env.render()\n\n # Choose an action by greedily (with e chance of random action) from the Q-network\n if np.random.rand(1) <= EPSILON or total_steps < PRE_TRAIN_STEPS:\n Q = sess.run(Q_out, feed_dict={inputs:one_hot_encoding(state) })\n act = env.action_space.sample()\n else:\n action, Q = sess.run([predict, Q_out], feed_dict={inputs:one_hot_encoding(state) })\n act = action[0]\n\n # Get new state and reward from environment\n next_state, reward, done, _ = env.step(act)\n\n if done:\n r = 1 if reward > 0 else -1\n else:\n r = 0\n\n Q_next_state = sess.run(Q_out, feed_dict={inputs:one_hot_encoding(next_state) })\n Q[0, act] = r + GAMMA * np.max(Q_next_state)\n\n # train the model\n sess.run(train_op, feed_dict={inputs:one_hot_encoding(state), Q_next:Q })\n\n total_reward += reward\n state = next_state\n total_steps += 1\n if done:\n print(\"episode: {}/{}, score: {}, e: {:.2f}\".format(episode + 1, EPISODES, total_reward, EPSILON))\n if total_steps > PRE_TRAIN_STEPS:\n EPSILON *= EPSILON_DECAY if EPSILON > EPSILON_MIN else 1\n break\n\n rewards.append(total_reward)\n\n if episode > 0 and (episode + 1) % 500 == 0:\n saver.save(sess, \"save/frozen-dqn/frozen-dqn.ckpt\")\n\n if episode > 0 and (episode + 1) % 1000 == 0:\n print(\"episode: {:d}\".format(episode + 1))\n print(\"Rewards: {:.2f}\".format(sum(rewards)))\n print(\"Percent of succesful episodes: {:.2f}%\".format(sum(rewards)/EPISODES))\n time.sleep(2.5)\n","sub_path":"com/alodokter/learn/frozenDQN.py","file_name":"frozenDQN.py","file_ext":"py","file_size_in_byte":3216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"516823707","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 31 11:50:15 2015\r\n\r\n@author: thoma_000\r\n\"\"\"\r\nimport sys,os\r\n\r\npyVERSION = float(sys.version[:3])\r\n\r\nif pyVERSION >= 3:\r\n\txrange = range\r\n\r\ndef ixrange(start, stop=None, step=1):\r\n\tif stop == None:\r\n\t\tstart, stop = 0, start\r\n\t\r\n\treturn xrange(start, stop+1, step)\r\n\t\r\ndef irange(start, stop=None, step=1):\r\n\tif stop == None:\r\n\t\tstart, stop = 0, start\r\n\t\r\n\treturn list(range(start, stop+1, step))\r\n\r\ndef mrange(start, stop=None, step=1):\r\n\t\"\"\" Takes mathematical indices 1,2,3,... and returns a range in the information\r\n\ttheoretical format 0,1,2,...\r\n\t\"\"\"\r\n\tif stop == None:\r\n\t\tstart, stop = 1, start\r\n\t\r\n\treturn list(range(start-1, stop, step))\r\n\t\r\ndef assert_dir(path):\r\n\t\"\"\"\r\n\tMake sure the path exists. If not, create it.\r\n\t\"\"\"\r\n\tif not os.access(path, os.F_OK):\r\n\t\tos.mkdir(path)\r\n\t\t\r\ndef isfloat(x):\r\n\ttry:\r\n\t\ttemp = float(x)\r\n\t\treturn True\r\n\texcept ValueError:\r\n\t\treturn False","sub_path":"lacommon.py","file_name":"lacommon.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"103680729","text":"#!/usr/bin/env python3.4\n\n# Eric Murphy\n# HW 7\n# murph141\n# 3/12/15\n\n# Imports\nfrom copy import deepcopy\nimport sys\nsys.path.append(\"../BitVector\")\n\nfrom BitVector import *\n\n# Modulus\nbig_pow = 2 ** 64\n\n# Generate the key structure\ndef K():\n keys = ['428a2f98d728ae22', '7137449123ef65cd', 'b5c0fbcfec4d3b2f', 'e9b5dba58189dbbc',\n '3956c25bf348b538', '59f111f1b605d019', '923f82a4af194f9b', 'ab1c5ed5da6d8118',\n 'd807aa98a3030242', '12835b0145706fbe', '243185be4ee4b28c', '550c7dc3d5ffb4e2',\n '72be5d74f27b896f', '80deb1fe3b1696b1', '9bdc06a725c71235', 'c19bf174cf692694',\n 'e49b69c19ef14ad2', 'efbe4786384f25e3', '0fc19dc68b8cd5b5', '240ca1cc77ac9c65',\n '2de92c6f592b0275', '4a7484aa6ea6e483', '5cb0a9dcbd41fbd4', '76f988da831153b5',\n '983e5152ee66dfab', 'a831c66d2db43210', 'b00327c898fb213f', 'bf597fc7beef0ee4',\n 'c6e00bf33da88fc2', 'd5a79147930aa725', '06ca6351e003826f', '142929670a0e6e70',\n '27b70a8546d22ffc', '2e1b21385c26c926', '4d2c6dfc5ac42aed', '53380d139d95b3df',\n '650a73548baf63de', '766a0abb3c77b2a8', '81c2c92e47edaee6', '92722c851482353b',\n 'a2bfe8a14cf10364', 'a81a664bbc423001', 'c24b8b70d0f89791', 'c76c51a30654be30',\n 'd192e819d6ef5218', 'd69906245565a910', 'f40e35855771202a', '106aa07032bbd1b8',\n '19a4c116b8d2d0c8', '1e376c085141ab53', '2748774cdf8eeb99', '34b0bcb5e19b48a8',\n '391c0cb3c5c95a63', '4ed8aa4ae3418acb', '5b9cca4f7763e373', '682e6ff3d6b2b8a3',\n '748f82ee5defb2fc', '78a5636f43172f60', '84c87814a1f0ab72', '8cc702081a6439ec',\n '90befffa23631e28', 'a4506cebde82bde9', 'bef9a3f7b2c67915', 'c67178f2e372532b',\n 'ca273eceea26619c', 'd186b8c721c0c207', 'eada7dd6cde0eb1e', 'f57d4f7fee6ed178',\n '06f067aa72176fba', '0a637dc5a2c898a6', '113f9804bef90dae', '1b710b35131c471b',\n '28db77f523047d84', '32caab7b40c72493', '3c9ebe0a15c9bebc', '431d67c49c100d4c',\n '4cc5d4becb3e42b6', '597f299cfc657e2a', '5fcb6fab3ad6faec', '6c44198c4a475817']\n\n keys_bv = [BitVector(hexstring=keys[i]) for i in range(len(keys))]\n\n return(keys_bv)\n\n\n# T1 Function\ndef T1(values):\n # Unpack the tuple\n a, b, c, d, e, f, g, h, w, k = deepcopy(values)\n\n temp = int(Ch(e.deep_copy(), deepcopy(f), deepcopy(g)))\n\n se = int(sigmaE(e.deep_copy()))\n\n # Calculate modulo addition\n val = (temp + se + int(h) + int(w) + int(k)) % big_pow\n\n return(BitVector(intVal=val, size=64))\n\n\n# T2 Function\ndef T2(values):\n # Unpack the tuple\n a, b, c, d, e, f, g, h, w, k = deepcopy(values)\n\n sa = int(sigmaA(deepcopy(a)))\n\n major = int(Maj(deepcopy(a), deepcopy(b), deepcopy(c)))\n\n # Calculate modulo addition\n val = (sa + major) % big_pow\n\n return(BitVector(intVal=val, size=64))\n\n\n# Ch function\ndef Ch(e, f, g):\n return((deepcopy(e) & deepcopy(f)) ^ (~deepcopy(e) & deepcopy(g)))\n\n\n# Major function\ndef Maj(a, b, c):\n return((deepcopy(a) & deepcopy(b)) ^ (deepcopy(a) & deepcopy(c)) ^ (deepcopy(b) & deepcopy(c)))\n\n\n# Sigma a function\ndef sigmaA(a):\n temp = (deepcopy(a) >> 28) ^ (deepcopy(a) >> 34) ^ (deepcopy(a) >> 39)\n return(temp)\n\n\n# Sigma e function\ndef sigmaE(e):\n temp = (deepcopy(e) >> 14) ^ (deepcopy(e) >> 18) ^ (deepcopy(e) >> 41)\n return(temp)\n\n\n# Sigma 0 function\ndef sigma0(value):\n temp = (deepcopy(value) >> 1) ^ (deepcopy(value) >> 8) ^ (deepcopy(value).shift_right(7))\n return(temp)\n\n\n# Sigma 1 function\ndef sigma1(value):\n temp = (deepcopy(value) >> 19) ^ (deepcopy(value) >> 61) ^ (deepcopy(value).shift_right(6))\n return(temp)\n\n\n# Defining the initialization vector\ndef IV():\n init_vect = ('6a09e667f3bcc908',\n 'bb67ae8584caa73b',\n '3c6ef372fe94f82b',\n 'a54ff53a5f1d36f1',\n '510e527fade682d1',\n '9b05688c2b3e6c1f',\n '1f83d9abfb41bd6b',\n '5be0cd19137e2179')\n\n iv_bv = [BitVector(hexstring=init_vect[i]) for i in range(8)]\n\n return(iv_bv)\n\n\n# Return the word schedule\ndef retW(message):\n master = [message[64*i:64*(i+1)] for i in range(16)]\n\n for i in range(16,80):\n temp = (int(master[i-16]) + int(sigma0(master[i-15])) + int(master[i-7]) + int(sigma1(master[i-2]))) % big_pow\n master.append(BitVector(intVal=temp, size=64))\n\n return(master)\n\n\n# Get the message from the file\ndef getMessage(file):\n with open(file) as f:\n content = f.read()\n\n # Create a bitvector out of the text\n message = BitVector(textstring = content)\n message_length = len(message)\n\n # Add the one bitvector\n one = BitVector(intVal = 1, size = 1)\n message_one = message + one\n length_message_one = len(message_one)\n\n # Calculate the number of zeros\n zeros = (1024 - 128 - length_message_one) % 1024\n zero_bv = BitVector(intVal=0, size=zeros)\n \n # Pad the message with zeros\n message_padded = message_one + zero_bv\n\n # Determine the length of the last bitvector\n length_bv = BitVector(intVal=message_length, size=128)\n\n # Create the total message\n total_message = message_padded + length_bv\n\n # Splice the message\n message_spliced = [total_message[1024*i:1024*(i+1)] for i in range(int(len(total_message) / 1024))]\n\n return(message_spliced)\n\n\n# The hash function (f)\ndef hashFunct(message, IV_val, IV_val2):\n # Unpack the values\n a, b, c, d, e, f, g, h = IV_val\n a1, b1, c1, d1, e1, f1, g1, h1 = IV_val2\n\n # Obtain the keys\n keys = K()\n\n # Run the 80 rounds\n for i in range(80):\n values = (a, b, c, d, e, f, g, h, message[i], keys[i])\n h = deepcopy(g)\n g = deepcopy(f)\n f = deepcopy(e)\n\n temp = int(T1(deepcopy(values)))\n tval = (temp + int(deepcopy(d))) % big_pow\n e = BitVector(intVal=tval, size=64)\n d = deepcopy(c)\n c = deepcopy(b)\n b = deepcopy(a)\n\n temp2 = int(T2(deepcopy(values)))\n tval2 = (temp + temp2) % big_pow\n a = BitVector(intVal=tval2, size=64)\n\n # Add the two values with modulo arithmetic\n tempa = (int(a) + int(a1)) % big_pow\n tempb = (int(b) + int(b1)) % big_pow\n tempc = (int(c) + int(c1)) % big_pow\n tempd = (int(d) + int(d1)) % big_pow\n tempe = (int(e) + int(e1)) % big_pow\n tempf = (int(f) + int(f1)) % big_pow\n tempg = (int(g) + int(g1)) % big_pow\n temph = (int(h) + int(h1)) % big_pow\n\n # Create bit vectors with the added values\n a_bv = BitVector(intVal=tempa, size=64)\n b_bv = BitVector(intVal=tempb, size=64)\n c_bv = BitVector(intVal=tempc, size=64)\n d_bv = BitVector(intVal=tempd, size=64)\n e_bv = BitVector(intVal=tempe, size=64)\n f_bv = BitVector(intVal=tempf, size=64)\n g_bv = BitVector(intVal=tempg, size=64)\n h_bv = BitVector(intVal=temph, size=64)\n\n # Create the return values\n ret_val = (a_bv.deep_copy(), b_bv.deep_copy(), c_bv.deep_copy(), d_bv.deep_copy(), e_bv.deep_copy(), f_bv.deep_copy(), g_bv.deep_copy(), h_bv.deep_copy())\n ret_val2 = (a_bv.deep_copy(), b_bv.deep_copy(), c_bv.deep_copy(), d_bv.deep_copy(), e_bv.deep_copy(), f_bv.deep_copy(), g_bv.deep_copy(), h_bv.deep_copy())\n\n return(ret_val, ret_val2)\n\n\n# Main\nif __name__ == \"__main__\":\n # Check usage\n if(len(sys.argv) != 2):\n print(\"Usage: ./hw07 \")\n sys.exit(1)\n\n # The first argument is the file name\n file = sys.argv[1]\n\n # Get the message from the file\n message = getMessage(file)\n\n # Create two initialization vectors\n h0 = IV()\n h1 = IV()\n \n # Iterate over each message block\n for i in range(len(message)):\n message[i] = retW(message[i])\n\n # Check if it is the first run through SHA512\n if(i == 0):\n h0, h1 = hashFunct(message[i], h0, h1)\n else:\n h0, h1 = hashFunct(message[i], h0, h1)\n\n # Unpack the tuple\n a, b, c, d, e, f, g, h = h0\n\n # Create a bitvector from the unpacked values\n total_bv = a + b + c + d + e + f + g + h\n\n # Return the final hash\n print(total_bv.get_hex_string_from_bitvector())\n\n\n### OUTPUT ###\n\"\"\"\n-bash-4.1$ cat test\neric\neric\neric\n\n-bash-4.1$ ./hw07.py test\ned3abf286b3456ab670aab7577f943cf97ccb9af800aa701d83994ec6dec0a48733fc492810a5c2ebf21e721a9488c1cde05e03a0601bab182d18a\n\n-bash-4.1$ sha512sum test\ned3abf286b3456ab670aab7577f943cf97ccb9af800aa701d83994ec6dec0a48733fc492810a5c2ebf21e721a9488c1cde05e03a0601bab182d18a test\n\"\"\"\n","sub_path":"HW07/hw07.py","file_name":"hw07.py","file_ext":"py","file_size_in_byte":8461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"397198583","text":"class Movie():\n \"\"\" Movie class is a representation of a single movie\n\n An instance object of Movie class can be initialized\n using the following list of arguments:\n\n Args:\n movie_title (str): Title of the movie.\n poster_url (str): The url to the the poster graphics.\n trailer_url (str): The url to the youtube video.\n\n \"\"\"\n def __init__(self, movie_tile, poster_url, trailer_url):\n self.movie_tile = movie_tile\n self.poster_url = poster_url\n self.trailer_url = trailer_url","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"261377889","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 15 00:05:12 2020\n\n@author: khush\n\"\"\"\nfrom flask import Flask, render_template, Response\nimport cv2\nimport numpy as np\n#import cv2\nimport tensorflow as tf\nimport time\nfrom tensorflow.keras.models import load_model\nimport requests\n\n\nclass_names = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',\n 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W','X', 'Y','Z', 'del','nothing', 'space']\n\nmodel = load_model('resnetV2.h5')\n\napp = Flask(__name__)\n\ndef adjust_gamma(image, gamma=1.0):\n\n invGamma = 1.0 / gamma\n table = np.array([((i / 255.0) ** invGamma) * 255\n for i in np.arange(0, 256)]).astype(\"uint8\")\n\n return cv2.LUT(image, table)\n\ncamera = cv2.VideoCapture(0) # use 0 for web camera\n# for cctv camera use rtsp://username:password@ip_address:554/user=username_password='password'_channel=channel_number_stream=0.sdp' instead of camera\n# for local webcam use cv2.VideoCapture(0)\n\ndef gen_frames(): # generate frame by frame from camera\n while True:\n # Capture frame-by-frame\n success, frame = camera.read() # read the camera frame\n cv2.rectangle(frame, (100, 50), (500, 450), (0, 0, 255))\n #cv2.imshow('frame', frame)\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n img = adjust_gamma(frame, gamma=0.7)\n #cv2.imshow('frame',frame)\n crop = np.array(frame[51:450, 101:500, :])\n \n dim = (200, 200)\n img = cv2.resize(crop, dim, interpolation=cv2.INTER_AREA)\n img = img.reshape((1, 200, 200, 3))\n img = img / 255.0\n predicted = model.predict(img)\n #cv2.putText(frame,class_names[np.argmax(predicted)],(0,0),cv2.FONT_HERSHEY_SIMPLEX,(0,255,255))\n no = class_names[np.argmax(predicted)]\n url = \"https://voice4mutes.firebaseio.com/ml_output.json\"\n headers = {\n 'Content-Type': 'application/json'\n }\n \n response = requests.post(url, json={\"Location\": no, \"ImageUrl\": \"jshdcb\"})\n \n print(response.text.encode('utf8'))\n #print(class_names[np.argmax(predicted)])\n \n if not success:\n break\n else:\n ret, buffer = cv2.imencode('.jpg', frame)\n frame = buffer.tobytes()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n') # concat frame one by one and show result\n\n\n@app.route('/video_feed')\ndef video_feed():\n #Video streaming route. Put this in the src attribute of an img tag\n return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')\n\n\n@app.route('/')\ndef index():\n \"\"\"Video streaming home page.\"\"\"\n return render_template('index.html')\n\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"ml-models/Integration_using_flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"87927554","text":"\"\"\"\r\nCreated on Fri Aug 31 19:24:42 2018\r\n\r\n@author: QI WANG\r\n\"\"\"\r\nimport nltk\r\nimport nltk.stem\r\nimport pandas as pd\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.tokenize import word_tokenize\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom nltk.stem import SnowballStemmer \r\nfrom nltk.stem.lancaster import LancasterStemmer \r\nfrom nltk.stem.porter import PorterStemmer\r\nfrom nltk.stem import WordNetLemmatizer\r\n#from nltk.tokenize import WordPunktTokenizer\r\nfrom nltk.tokenize.mwe import MWETokenizer\r\n\r\n\r\ndef MyOpentxt(file):\r\n Name = []\r\n f = open(file,encoding='utf_8_sig')\r\n for row in f.readlines():\r\n row = row.rstrip()\r\n Name.append(row)\r\n f.close()\r\n Name = '/n'.join(Name)\r\n return Name\r\n\r\nTrump = MyOpentxt(\"Trump2016.txt\").lower()\r\nClinton = MyOpentxt(\"Clinton2016.txt\").lower()\r\n\r\n\r\ndef MyVectorization(text,Stem,TK,Punctuations,stops):\r\n Snowball_Stemmer = SnowballStemmer('english')\r\n Porter_Stemmer = PorterStemmer() \r\n Lancaster_Stemmer = LancasterStemmer()\r\n english_punctuations = [',', '.', ':', ';', '?', '(', ')', '[', ']', '&', '!', '*', '@', '#', '$', '%','...','/','-','``','\"']\r\n stops = ['is', 'are', 'the', 'a', 'an']\r\n \r\n if Stem == 1:\r\n text = Snowball_Stemmer.stem(text) \r\n elif Stem == 2:\r\n text = Porter_Stemmer.stem(text)\r\n elif Stem == 3:\r\n text = Lancaster_Stemmer.stem(text)\r\n elif Stem == 0:\r\n text = text\r\n else: \r\n print('Error')\r\n \r\n lemmatizer = WordNetLemmatizer()\r\n text = lemmatizer.lemmatize(text)\r\n \r\n if TK == 1:\r\n text = nltk.word_tokenize(text)\r\n elif TK == 2:\r\n MWEtokenizer = MWETokenizer()\r\n text = MWEtokenizer.tokenize(text.split())\r\n \r\n if Punctuations == 1:\r\n text = [word for word in text if word not in english_punctuations]\r\n else:\r\n text = text\r\n \r\n if stops == 1:\r\n text = [word for word in text if word not in stops]\r\n else:\r\n text = text\r\n\r\n Freq = nltk.FreqDist(text)\r\n \r\n return text,Freq\r\n\r\n# Tokens \r\nTrumpT,Trump_F = MyVectorization(Trump,2,2,1,1)\r\nClintonT,Clinton_F = MyVectorization(Clinton,2,2,1,1)\r\n\r\n# Tags\r\nTrumptags = nltk.pos_tag(TrumpT)\r\nClintontags = nltk.pos_tag(ClintonT)\r\n\r\n#Words Frequency\r\ndf=pd.DataFrame(TrumpT, columns=['word'])\r\nTrumpF=df[\"word\"].value_counts()\r\n\r\ndf=pd.DataFrame(ClintonT, columns=['word'])\r\nClintonF=df[\"word\"].value_counts()\r\n\r\n\r\n# Tag Frequency\r\n\r\ndef MyTagFrq(tag):\r\n TF = []\r\n for i in range(len(tag)):\r\n TF.append(tag[i][1:])\r\n df=pd.DataFrame(TF, columns=['Tag Frequency'])\r\n TF = df['Tag Frequency'].value_counts()\r\n return TF\r\n \r\nTrumptagF = MyTagFrq(Trumptags)\r\nClintonTagF = MyTagFrq(Clintontags)\r\n\r\n\r\n\r\n#ClintonTag = []\r\n#for i in range(len(Clintontags)):\r\n# ClintonTag.append(Clintontags[i][1:])\r\n#df=pd.DataFrame(ClintonTag, columns=['Tag Frequency'])\r\n#ClintonTagF=df['Tag Frequency'].value_counts()","sub_path":"ConvertToVector.py","file_name":"ConvertToVector.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"31586757","text":"import csv\nimport sqlite3\n\n\ndef ocs():\n to_db = []\n with open('export.csv', 'rb') as fin:\n reader = csv.DictReader(fin)\n for i in reader:\n to_db.append((i['Computer;Network: MAC Address;'].split(\";\")[0].lower() + \".mtcdom.multimatic.com\",\n i['Computer;Network: MAC Address;'].split(\";\")[1]))\n return to_db\n\n\ncon = sqlite3.connect(\"address_list_sqlite3.db\")\ncur = con.cursor()\ncur.execute(\"CREATE TABLE IF NOT EXISTS mac_list (computer_name, mac_address, UNIQUE(computer_name, mac_address));\")\ncur.executemany(\"INSERT OR IGNORE INTO mac_list (computer_name, mac_address) VALUES (?, ?);\", ocs())\ncon.commit()\ncon.close()\n","sub_path":"server/csv_to_sqlite3.py","file_name":"csv_to_sqlite3.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"50254622","text":"import os\nimport time\n\n\nclass Downloader():\n def __init__(self):\n pass\n\n def get_url(self):\n if not os.path.exists('urls.txt'):\n os.mknod(\"urls.txt\")\n\n with open('urls.txt') as file:\n urls = [url.strip('\\n') for url in file.readlines()]\n\n if len(urls) == 0:\n print('请将待下载视频网址放于urls.txt中,每行一条')\n exit()\n\n return urls\n\n def url_parse(self, url):\n # -i 后面有空格\n command = 'you-get -i ' + url\n print('正在解析:')\n info = os.popen(command).readlines()\n for i in info:\n print(i)\n\n return info\n\n def download(self, url):\n self.dir = './download'\n command = 'you-get -o ' + self.dir + ' ' + url\n print('开始下载:')\n os.system(command)\n\nif __name__ == '__main__':\n\n pro_start_time = time.time()\n\n downloader = Downloader()\n urls = downloader.get_url()\n\n for url in urls:\n start_time = time.time()\n # downloader.url_parse(url)\n downloader.download(url)\n print(\"耗时:%d分\" % ((time.time()-start_time)/60))\n print('*'*40)\n\n print('共耗时:%d分' % ((time.time()-pro_start_time)/60))\n\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"24542498","text":"#\n# chainclass.py\n#\n# Copyright © 2009-2010, 2012-2013, 2015-2016 Monotype Imaging Inc. All Rights Reserved.\n#\n\n\"\"\"\nSupport for format 2 GSUB chaining contextual substitution tables.\n\"\"\"\n\n# System imports\nimport collections\nimport itertools\nimport operator\n\n# Other imports\nfrom fontio3 import utilities\nfrom fontio3.GSUB.effects import EffectsSummary\nfrom fontio3.opentype import pschainclass, runningglyphs\n \n# -----------------------------------------------------------------------------\n\n#\n# Classes\n#\n\nclass ChainClass(pschainclass.PSChainClass):\n \"\"\"\n Objects containing format 2 chaining contextual GSUB lookups.\n \n These are dicts mapping Keys to PSLookupGroups.\n \n >>> _testingValues[0].pprint(namer=namer.testingNamer())\n Key((ClassTuple((1, 2)), ClassTuple((1,)), ClassTuple((1,))), ruleOrder=0):\n Effect #1:\n Sequence index: 0\n Lookup:\n Subtable 0 (Single substitution table):\n xyz3: xyz42\n xyz5: xyz43\n xyz7: xyz44\n Lookup flags:\n Right-to-left for Cursive: False\n Ignore base glyphs: False\n Ignore ligatures: False\n Ignore marks: False\n Sequence order (lower happens first): 0\n Class definition table (backtrack):\n xyz52: 1\n xyz53: 1\n xyz55: 2\n xyz56: 2\n Class definition table (input):\n xyz3: 1\n xyz5: 1\n xyz7: 1\n Class definition table (lookahead):\n xyz81: 1\n xyz82: 1\n xyz83: 1\n \"\"\"\n \n #\n # Class constant\n #\n \n kind = ('GSUB', 6)\n kindString = \"Chaining contextual (class) substitution table\"\n \n #\n # Methods\n #\n \n def effects(self, **kwArgs):\n raise DeprecationWarning(\n \"The effects() method is deprecated; \"\n \"please use effectsSummary() instead.\")\n \n def effectsSummary(self, **kwArgs):\n \"\"\"\n Returns an EffectsSummary object. If present, notes will be made in a\n provided memo kwArgs to allow elision of reprocessing, which should\n eliminate the combinatoric explosion.\n \n >>> obj = _testingValues[0]\n >>> memo = {}\n >>> es = obj.effectsSummary(memo=memo)\n >>> es.pprint()\n 2:\n 41\n 4:\n 42\n 6:\n 43\n >>> len(memo) # will have obj *and* the single substitution subtable\n 2\n \"\"\"\n \n memo = kwArgs.pop('memo', {})\n \n if id(self) in memo:\n return memo[id(self)]\n \n r = EffectsSummary()\n revMap = utilities.invertDictFull(self.classDefInput, asSets=True)\n \n for key, lkGroup in self.items():\n for lkRec in lkGroup:\n ci = key[1][lkRec.sequenceIndex]\n \n if ci:\n onlyWant = revMap[ci]\n else:\n onlyWant = self.coverageExtras\n \n for sub in lkRec.lookup:\n if id(sub) not in memo:\n memo[id(sub)] = sub.effectsSummary(**kwArgs)\n \n r.updateSets(memo[id(sub)], onlyWant=onlyWant)\n \n memo[id(self)] = r\n return r\n \n def run(glyphArray, **kwArgs):\n raise DeprecationWarning(\n \"The run() method is deprecated; \"\n \"please use runOne() instead.\")\n \n def runOne(self, glyphArray, startIndex, **kwArgs):\n \"\"\"\n Do the processing for a single (initial) glyph in a glyph array. This\n method is called by the Lookup object's run() method (and possibly by\n actions within contextual or related subtables).\n \n This method returns a pair: the new output GlyphList, and a count of\n the number of glyph indices involved (or zero, if no action happened).\n \n Note that igs is used in this method.\n \n >>> obj = _testingValues[0]\n >>> obj.pprint()\n Key((ClassTuple((1, 2)), ClassTuple((1,)), ClassTuple((1,))), ruleOrder=0):\n Effect #1:\n Sequence index: 0\n Lookup:\n Subtable 0 (Single substitution table):\n 2: 41\n 4: 42\n 6: 43\n Lookup flags:\n Right-to-left for Cursive: False\n Ignore base glyphs: False\n Ignore ligatures: False\n Ignore marks: False\n Sequence order (lower happens first): 0\n Class definition table (backtrack):\n 51: 1\n 52: 1\n 54: 2\n 55: 2\n Class definition table (input):\n 2: 1\n 4: 1\n 6: 1\n Class definition table (lookahead):\n 80: 1\n 81: 1\n 82: 1\n \n >>> ga = runningglyphs.GlyphList.fromiterable([12, 51, 77, 54, 6, 77, 80])\n >>> igsFunc = lambda *a, **k: [False, False, True, False, False, True, False]\n >>> r, count = obj.runOne(ga, 0, igsFunc=igsFunc)\n >>> count\n 0\n >>> r is ga\n True\n \n >>> r, count = obj.runOne(ga, 4, igsFunc=igsFunc)\n >>> count\n 1\n >>> r.pprint()\n 0:\n Value: 12\n originalOffset: 0\n 1:\n Value: 51\n originalOffset: 1\n 2:\n Value: 77\n originalOffset: 2\n 3:\n Value: 54\n originalOffset: 3\n 4:\n Value: 43\n originalOffset: 4\n 5:\n Value: 77\n originalOffset: 5\n 6:\n Value: 80\n originalOffset: 6\n \"\"\"\n \n # We pop the igsFunc because the lookups we're going to call to do the\n # effects might have different flags.\n \n igsFunc = kwArgs.pop('igsFunc')\n igs = igsFunc(glyphArray, **kwArgs)\n useLLOrder = kwArgs.get('useLLOrder', True)\n firstGlyph = glyphArray[startIndex]\n cdBack = self.classDefBacktrack\n cdIn = self.classDefInput\n cdLook = self.classDefLookahead\n \n # Find all non-ignorables (not just starting with startIndex, since we\n # potentially need backtrack here too...)\n \n v = [\n (g, i)\n for i, g in enumerate(glyphArray)\n if (not igs[i])]\n \n vNonIgs = [x[0] for x in v]\n vBackMap = [x[1] for x in v]\n startIndexNI = vBackMap.index(startIndex)\n \n for key in self:\n if cdIn.get(firstGlyph, 0) != key[1][0]:\n continue\n \n backLen, inLen, lookLen = [len(x) for x in key]\n totalLen = backLen + inLen + lookLen\n \n if backLen > startIndexNI:\n continue\n \n if (inLen + lookLen) > (len(vNonIgs) - startIndexNI):\n continue\n \n pieceStart = startIndexNI - backLen\n pieceBack = [cdBack.get(x, 0) for x in vNonIgs[pieceStart:pieceStart+backLen]]\n pieceIn = [cdIn.get(x, 0) for x in vNonIgs[pieceStart+backLen:pieceStart+backLen+inLen]]\n pieceLook = [cdLook.get(x, 0) for x in vNonIgs[pieceStart+backLen+inLen:pieceStart+totalLen]]\n \n if not all(a == b for a, b in zip(pieceBack+pieceIn+pieceLook, sum(key, ()))):\n continue\n \n # If we get here the key is a match\n \n r = glyphArray.fromiterable(glyphArray) # preserves offsets\n \n if useLLOrder:\n v = [(obj, obj.lookup.sequence) for obj in self[key]]\n it = [t[0] for t in sorted(v, key=operator.itemgetter(1))]\n \n else:\n it = self[key]\n \n count = vBackMap[startIndexNI + inLen - 1] - vBackMap[startIndexNI] + 1\n \n for effIndex, eff in enumerate(it):\n rNew, subCount = eff.lookup.runOne_GSUB(\n r,\n startIndex = vBackMap[startIndexNI + eff.sequenceIndex],\n **kwArgs)\n \n if not subCount:\n continue\n \n # The effect's Lookup did something. This might affect the igs\n # so they need to be recalculated, and the vNonIgs and vBackMap\n # then also need to be redone.\n \n if effIndex < (len(it) - 1):\n igs = igsFunc(rNew, **kwArgs)\n \n v = [\n (g, i)\n for i, g in enumerate(rNew)\n if (not igs[i])]\n \n vNonIgs = [x[0] for x in v]\n vBackMap = [x[1] for x in v]\n assert startIndexNI == vBackMap.index(startIndex)\n \n delta = len(rNew) - len(r)\n count += delta\n r = rNew\n \n return (r, count)\n \n return (glyphArray, 0)\n \n# -----------------------------------------------------------------------------\n\n#\n# Test code\n#\n\nif 0:\n def __________________(): pass\n\nif __debug__:\n from fontio3.utilities import namer\n \n def _makeTV():\n from fontio3.GSUB import single\n \n from fontio3.opentype import (\n classdef,\n lookup,\n pschainclass_classtuple,\n pschainclass_key,\n pslookupgroup,\n pslookuprecord)\n \n single_obj = single.Single({2: 41, 4: 42, 6: 43})\n lookup_obj = lookup.Lookup([single_obj], sequence=0)\n pslookuprecord_obj = pslookuprecord.PSLookupRecord(0, lookup_obj)\n pslookupgroup_obj = pslookupgroup.PSLookupGroup([pslookuprecord_obj])\n tBack = pschainclass_classtuple.ClassTuple([1, 2])\n tIn = pschainclass_classtuple.ClassTuple([1])\n tLook = pschainclass_classtuple.ClassTuple([1])\n key_obj = pschainclass_key.Key([tBack, tIn, tLook])\n cdBack = classdef.ClassDef({51: 1, 52: 1, 54: 2, 55: 2})\n cdIn = classdef.ClassDef({2: 1, 4: 1, 6: 1})\n cdLook = classdef.ClassDef({80: 1, 81: 1, 82: 1})\n \n return ChainClass(\n {key_obj: pslookupgroup_obj},\n classDefBacktrack = cdBack,\n classDefInput = cdIn,\n classDefLookahead = cdLook)\n \n _testingValues = (\n _makeTV(),)\n\ndef _test():\n import doctest\n doctest.testmod()\n\nif __name__ == \"__main__\":\n if __debug__:\n _test()\n","sub_path":"fontio3/fontio3/GSUB/chainclass.py","file_name":"chainclass.py","file_ext":"py","file_size_in_byte":10531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"578030690","text":"import os\n\nos.system('ls -1 CommonSampleInfo/*.txt &> temp.txt');\nlines = open('temp.txt').readlines()\nos.system('rm temp.txt')\n\nout_MC = open('SampleSummary_MC.txt','w')\nout_Sig_PairN = open('SampleSummary_Signal_PairN.txt','w')\nout_Sig_HNWR = open('SampleSummary_Signal_HNWR.txt','w')\n\nFirstLine = '# alias PD xsec nmc sumw\\n'\nout_MC.write(FirstLine)\nout_Sig_PairN.write(FirstLine)\nout_Sig_HNWR.write(FirstLine)\n\nfor line in lines:\n\n line = line.strip('\\n')\n alias = line.replace('CommonSampleInfo/','').replace('.txt','')\n\n infoline = open(line).readlines()[1]\n\n if alias.startswith(\"HNPairToJJJJ\"):\n out_Sig_PairN.write(infoline)\n elif alias.startswith(\"WR_\"):\n out_Sig_HNWR.write(infoline)\n else:\n out_MC.write(infoline)\n\nout_MC.close()\nout_Sig_PairN.close()\nout_Sig_HNWR.close()\n","sub_path":"data/v949cand2_1/Sample/2017/Summarize.py","file_name":"Summarize.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"421866612","text":"#practicing with lists 10/1/20\r\n\r\n\r\n\r\n\r\n\"\"\"destructive methods of adding values:\r\nuse append, insert, or += \"\"\"\r\nlst = [1, 2, 3]\r\nlst.append(4)\r\nprint(lst) #[1, 2, 3, 4]\r\n\r\nlst2 = [1, 2, 3]\r\nlst2.insert(1, 2)\r\nprint(lst2) #[1, 2, 2, 3]\r\n\r\n\r\nlst3 = [1, 2, 3]\r\nlst3 += [10, 20]\r\nprint(lst3) #[1, 2, 3, 10, 20]\r\n\r\n\r\n\r\n\"\"\"destructive methods of removing values:\"\"\"\r\nlst = [1, 2, 3]\r\nlst.remove(2)\r\nprint(lst) #[1, 3]\r\n\r\n\r\n\r\nlst1 = [1, 2, 3]\r\nlst1.pop(1)\r\nprint(lst1) #[1, 3]\r\n\r\n\r\n\r\n\r\n\"\"\"non destructive methods of adding values\"\"\"","sub_path":"List modification practice.py","file_name":"List modification practice.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"493243646","text":"import base64\nimport logging\nimport os\nimport unicodedata\nfrom urllib.error import URLError\n\nfrom youtube_dl import YoutubeDL\nfrom youtube_dl.utils import ExtractorError, DownloadError\n\nfrom core import config\nfrom core.utils import string_utils\n\nlogging.getLogger('niconico').setLevel('CRITICAL')\nlogging.getLogger('youtube_dl').setLevel('CRITICAL')\n\nk_DOWNLOADS_FOLDER_PATH = 'downloads'\n\n\nclass CustomYoutubeDL(YoutubeDL):\n def __init__(self, video, logger=None):\n params = get_ydl_options(video=video, logger=logger)\n logger.debug(\"outtmpl='{}'\".format(params['outtmpl']))\n YoutubeDL.__init__(self, params=params)\n self.video = video\n\n def download(self):\n return YoutubeDL.download(self, [self.video.url])\n\n @property\n def filename(self):\n if os.path.exists(k_DOWNLOADS_FOLDER_PATH):\n files = os.listdir(k_DOWNLOADS_FOLDER_PATH)\n filtered_list = list(filter(lambda f: self.video.video_id in f, files))\n if filtered_list and len(filtered_list) == 1:\n return filtered_list[0]\n return None\n\n @property\n def path(self):\n if self.filename:\n return '{}/{}'.format(k_DOWNLOADS_FOLDER_PATH, self.filename)\n else:\n return None\n\n def remove_local_file(self):\n os.remove(self.path)\n\n\ndef download(video, logger, storage):\n ydl = CustomYoutubeDL(video, logger)\n try:\n if ydl.download() != 0:\n raise RuntimeError('Download failed')\n if storage:\n if not ydl.path:\n logger.debug('Download completed but the file is not there; cannot upload to cloud')\n raise RetriableError\n logger.debug('Now uploading the file to cloud...')\n filename = sanitize_title(ydl.filename)\n storage.upload_file(filename, ydl.path)\n logger.debug('Upload done')\n except (URLError, ExtractorError, DownloadError, MemoryError) as e:\n if 'Niconico videos now require logging in' in str(e):\n raise LogInError\n else:\n logger.debug(e)\n raise RetriableError\n finally:\n if storage and ydl.path:\n ydl.remove_local_file()\n\n\ndef sanitize_title(title):\n title = unicodedata.normalize('NFKC', title)\n for key, value in config.global_instance['title_sanitization'].items():\n if key in title:\n title = title.replace(key, value)\n while ' ' in title:\n title = title.replace(' ', ' ')\n return title\n\n\ndef encode_title(video_title):\n return base64.urlsafe_b64encode(str.encode(video_title)).decode()\n\n\ndef decode_title(b64str):\n return base64.urlsafe_b64decode(b64str.encode()).decode()\n\n\ndef get_ydl_options(video, logger=None):\n title = string_utils.multi_replace(video.title, {'/': '-', '%': '%'}) if video.title else '%(title)s'\n options = {\n 'format': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best',\n 'outtmpl': '{}/%(upload_date)s-{}-%(id)s.%(ext)s'.format(k_DOWNLOADS_FOLDER_PATH, title),\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': config.global_instance['convert_to'] if 'convert_to' in config.global_instance else 'm4a',\n 'preferredquality': '320',\n }]\n }\n\n if logger:\n cl = logger if type(logger) is CustomLogger else CustomLogger(logger=logger)\n options['logger'] = cl\n options['progress_hooks']: [cl.hook]\n\n if video.login_failed:\n if config.global_instance.has_nico_creds():\n nc = config.global_instance.get_random_nico_creds()\n options['username'] = nc.username\n options['password'] = nc.password\n logger.debug(video.video_id + ' login failed previously; logging in this time with ' + nc.username)\n else:\n raise LogInError(video.video_id + ' login failed previously, but nicovideo credentials not provided')\n\n return options\n\n\nclass CustomLogger:\n def __init__(self, logger):\n self.logger = logger\n self.history = []\n\n def debug(self, msg):\n self.log(self.logger.debug, msg)\n\n def warning(self, msg):\n self.log(self.logger.debug, msg)\n\n def error(self, msg):\n self.log(self.logger.debug, msg)\n\n def hook(self, d):\n pass\n\n def log(self, func, msg, append_to_history=False):\n if append_to_history:\n self.history.append(msg)\n func(msg)\n\n\nclass RetriableError(Exception):\n pass\n\n\nclass LogInError(RetriableError):\n pass\n","sub_path":"core/custom_youtube_dl.py","file_name":"custom_youtube_dl.py","file_ext":"py","file_size_in_byte":4570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"557861258","text":"from tkinter import *\r\nfrom tkinter import filedialog\r\n\r\ndef saveFile():\r\n f = filedialog.asksaveasfile(mode=\"w\", defaultextension=\".txt\")\r\n if f is None:\r\n return\r\n try:\r\n textUserWrote = str(content.get(1.0, END))\r\n f.write(textUserWrote)\r\n except:\r\n print(\"Cannot save the file\")\r\n finally:\r\n f.close()\r\n\r\ndef openFile():\r\n try:\r\n t = filedialog.askopenfile(mode=\"r\", title=\"Select File\", \r\n filetypes=[(\"All Files\", \"*.*\")])\r\n content.insert(END, t.read())\r\n except:\r\n print(\"Cannot load the file\")\r\n finally:\r\n if t:\r\n t.close()\r\n\r\ndef closeWindow():\r\n window.destroy()\r\n\r\nwindow = Tk()\r\n\r\nmainMenu = Menu(window)\r\nwindow.config(menu=mainMenu)\r\n\r\nfileMenu = Menu(mainMenu)\r\nmainMenu.add_cascade(label=\"File\", menu=fileMenu)\r\nfileMenu.add_command(label=\"Open\", command=openFile)\r\nfileMenu.add_command(label=\"Save\", command=saveFile)\r\nfileMenu.add_separator()\r\nfileMenu.add_command(label=\"Close\", command=closeWindow)\r\nmainMenu.add_command(label=\"Help\")\r\n\r\ncontent = Text(window, width=100)\r\n\r\ncontent.grid(row=0,column=0, padx=5,pady=5)\r\n\r\nwindow.mainloop()","sub_path":"_Learning/GUI/textEditor.py","file_name":"textEditor.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"92213574","text":"# 2. 计算出100以内的全部素数,将这些素数存于列表中,然后打印出列表中的这些素数\n\n# 遍历1~100之间的数,如果这个数是素数,加入到一个列表中\nL = [] # 此容器准备加入素数\nfor x in range(1, 101):\n # 如果x是素数,则把x加入到L中,否则跳过\n isprime = True # 先假设x是素数\n # 如果x不是素数,就把isprime置为False\n if x < 2:\n isprime = False\n else:\n for i in range(2, x):\n if x % i == 0: # 整除不是素数\n isprime = False\n break\n if isprime: # 一定为素数\n L.append(x)\n\nprint(\"L = \", L)","sub_path":"python/python/老师笔记/python/day07/day07/day06_exercise/prime_100.py","file_name":"prime_100.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"425741313","text":"from rest_framework import serializers\nfrom . import models\nfrom interests.models import Interest\n\nclass UserSerializer(serializers.ModelSerializer):\n interests = serializers.SlugRelatedField(\n many=True,\n read_only=False,\n slug_field='name',\n queryset=Interest.objects.all(),\n )\n class Meta:\n model = models.CustomUser\n fields = ('url', 'id', 'email', 'username','interests', 'pic1', 'pic2', 'pic3', 'pic4', 'pic5', 'memberships', 'description', )\n","sub_path":"users/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"57458666","text":"import os\r\nimport glob\r\nimport pandas as pd \r\nimport numpy as np \r\nimport matplotlib.pyplot as plt \r\n\r\npath = r'C:\\Users\\delbe\\Downloads\\wut\\wut\\Post_grad\\UBC\\Research\\lab\\kinetic_models\\four\\parameterization\\newdb_xpp'\r\n\r\nfa_path = glob.glob(path + '/fa/*.dat') \r\nwt_path = glob.glob(path + '/wt/*.dat') \r\n\r\ndef get_xpp(path): \r\n \r\n df_list = [] \r\n \r\n for n in range(len(path)): \r\n df = pd.read_csv( path[n], sep=\"\\s+\", header = None ) \r\n df = df.rename( columns = { 5 : str( df.iloc[0, 7] ) } )\r\n df = df.iloc[:, [0, 5]]\r\n \r\n df_list.append(df) \r\n \r\n dfmerge = pd.concat(df_list, axis=1) \r\n return dfmerge \r\n\r\nfa_xpp = get_xpp(fa_path) \r\nwt_xpp = get_xpp(wt_path) \r\n\r\nsim_path = r'C:\\Users\\delbe\\Downloads\\wut\\wut\\Post_grad\\UBC\\Research\\lab\\kinetic_models\\four\\parameterization'\r\nwt_sim = sim_path + '/wt_newdb.xlsx' \r\nfa_sim = sim_path + '/fa_newdb.xlsx' \r\n\r\nwt_sim = pd.read_excel(wt_sim) \r\nfa_sim = pd.read_excel(fa_sim)\r\n\r\nwt_data = pd.read_excel( sim_path + '/wt_data.xlsx' ) \r\nfa_data = pd.read_excel( sim_path + '/fa_data.xlsx' ) \r\n\r\nwt_data_x = wt_data.iloc[:, range(0, 20, 2)] \r\nfa_data_x = fa_data.iloc[:, range(0, 22, 2)] \r\nwt_data_y = wt_data.iloc[:, range(1, 21, 2)] \r\nfa_data_y = fa_data.iloc[:, range(1, 23, 2)] \r\n\r\nfa_xpp_x = fa_xpp.iloc[:, range(0, 22, 2)]\r\nwt_xpp_x = wt_xpp.iloc[:, range(0, 22, 2)]\r\nfa_xpp_y = fa_xpp.iloc[:, range(1, 23, 2)] \r\nwt_xpp_y = wt_xpp.iloc[:, range(1, 23, 2)] \r\n\r\nfa_sim_act = fa_sim.iloc[:, range(8)] \r\nfa_sim_de = fa_sim.iloc[:, range(8, 11)]\r\nwt_sim_act = wt_sim.iloc[:, range(8)]\r\nwt_sim_de = wt_sim.iloc[:, range(8, 10)] \r\n\r\nwt_sim_act = wt_sim.iloc[:, range(8)]\r\nwt_sim_de = wt_sim.iloc[:, range(8, 10)]\r\nfa_sim_act= fa_sim.iloc[:, range(8)] \r\nfa_sim_de = fa_sim.iloc[:, range(8, 11)] \r\n\r\n#normalize xpp \r\nlast_wt = wt_xpp.iloc[28000, 5] \r\nlast_fa = fa_xpp.iloc[28000, 5] \r\n\r\n#plt.plot(wt_xpp.iloc[:, range(0, 18, 2)], fa_xpp.iloc[:, range(1, 19, 2)])\r\n\r\nfor i in range(8): \r\n #wt_xpp_y.iloc[:, i] = wt_xpp_y.iloc[:, i].apply(lambda x : x / last_wt) \r\n #wt_xpp_y.iloc[:, i].apply(lambda x : x / last_wt) \r\n wt_xpp_y.iloc[:, i] = wt_xpp_y.iloc[:, i] / last_wt \r\n #fa_xpp_y.iloc[:, i] = fa_xpp_y.iloc[:, i].apply(lambda x : x / last_fa) \r\n #fa_xpp_y.iloc[:, i].apply(lambda x : x / last_fa) \r\n fa_xpp_y.iloc[:, i] = fa_xpp_y.iloc[:, i] / last_fa \r\n\r\nprint(fa_xpp_y.iloc[:, 10]) \r\n#normalize de \r\nlast_wt = wt_xpp_y.iloc[0, 10] \r\nlast_fa = fa_xpp_y.iloc[0, 10] \r\nfor i in range(8, 11): \r\n wt_xpp_y.iloc[:, i] = wt_xpp_y.iloc[:, i] / last_wt \r\nfor i in range(8, 11):\r\n fa_xpp_y.iloc[:, i] = fa_xpp_y.iloc[:, i] / last_fa \r\n\r\nprint(fa_xpp_y.iloc[:, 10]) \r\n\r\nwt_data_hds = wt_data_y.columns.tolist() #data \r\nfa_data_hds = fa_data_y.columns.tolist() \r\nfa_xpp_hds = fa_xpp_y.columns.tolist() #numerical \r\nwt_xpp_hds = wt_xpp_y.columns.tolist() \r\nwt_sim_hds = wt_sim.columns.tolist() #analytical \r\nfa_sim_hds = fa_sim.columns.tolist() \r\n\r\nf1 = plt.figure() \r\nax1 = f1.add_subplot(111)\r\n\r\nf2 = plt.figure() \r\nax2 = f2.add_subplot(111) \r\n\r\nf3 = plt.figure() \r\nax3 = f3.add_subplot(111) \r\n\r\nf4 = plt.figure() \r\nax4 = f4.add_subplot(111) \r\n\r\nfor i in range(8): \r\n ax1.plot(wt_data_x.iloc[:, i], wt_data_y.iloc[:, i], c='y', ls='-', label=wt_data_hds[i]) #data \r\n ax1.plot(wt_xpp_x.iloc[:, i], wt_xpp_y.iloc[:, i], c='b', ls='--', label=wt_xpp_hds[i], lw=3 ) #numerical \r\n ax1.plot(wt_sim_act, c='r', label=wt_sim_hds[i]) #analytical \r\n \r\n ax2.plot(fa_data_x.iloc[:, i], fa_data_y.iloc[:, i], c='y', ls='-', label=fa_data_hds[i]) \r\n ax2.plot(fa_xpp_x.iloc[:, i], fa_xpp_y.iloc[:, i], c='b', ls='--', label=fa_xpp_hds[i], lw=3 ) \r\n ax2.plot(fa_sim_act, c='r', label=fa_sim_hds[i]) \r\n \r\n #labels\r\n \r\nfor g in range(8, 10):\r\n ax3.plot(wt_data_x.iloc[:, g], wt_data_y.iloc[:, g], c='y', ls='-', label=wt_data_hds[g]) \r\n ax3.plot(wt_xpp_x.iloc[range(2001), g+1], wt_xpp_y.iloc[range(2001), g+1], c='b', ls='--', label=wt_xpp_hds[g], lw=3 ) \r\n ax3.plot(wt_sim_de, c='r', label=wt_sim_hds[g]) \r\n \r\n #labels \r\n \r\nfor h in range(8, 11): \r\n ax4.plot(fa_data_x.iloc[:, h], fa_data_y.iloc[:, h], c='y', ls='-', label=fa_data_hds[h]) \r\n ax4.plot(fa_xpp_x.iloc[:, h], fa_xpp_y.iloc[:, h], c='b', ls='--', label=fa_xpp_hds[h], lw=3 ) \r\n ax4.plot(fa_sim_de, c='r', label=fa_sim_hds[h]) \r\n \r\n #labels \r\n \r\n#print(wt_data_hds)\r\nprint(wt_xpp_hds) \r\nprint(fa_xpp_hds) \r\n\r\nax1.set_title('WT activation', fontsize=20, fontweight='bold')\r\nax2.set_title('FA activation', fontsize=20, fontweight='bold') \r\nax3.set_title('WT deactivation', fontsize=20, fontweight='bold') \r\nax4.set_title('FA deactivation', fontsize=20, fontweight='bold') \r\n\r\nfor x in [ax1, ax2, ax3, ax4]: \r\n x.set_ylabel('Normalized Open Probability', fontsize=14, fontweight='bold')\r\n x.set_xlabel('Time (ms)', fontsize=14, fontweight='bold')\r\n\r\nax3.set_title('WT deactivation', fontsize=20, fontweight='bold') \r\n\r\nplt.show() \r\n","sub_path":"xpp_plot.py","file_name":"xpp_plot.py","file_ext":"py","file_size_in_byte":4985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"315883991","text":"# Простой парсер Hacker News для поиска на нем репозиториев github\n\n\nfrom bs4 import BeautifulSoup\nimport requests\n\n\nx = 0\nwhile True:\n if x == 0:\n url = \"https://news.ycombinator.com/newest\"\n else:\n url = \"https://news.ycombinator.com/newest\" + nexx\n request = requests.get(url)\n\n soup = BeautifulSoup(request.text, \"html.parser\")\n\n teme = soup.find_all(\"td\", class_=\"title\")\n\n for temes in teme:\n\n temes = temes.find(\"a\", {'class':'storylink'})\n\n if temes is not None and 'github.com' in str(temes):\n sublink = temes.get('href')\n print(str(temes.text) + \" \" + str(sublink))\n print(\"===\")\n\n nex = soup.find(class_ = \"morelink\")\n nexlink = nex.get('href')\n\n nexx = nexlink[6:]\n x = x+1\n\n\n\n\n","sub_path":"Parser_web/Parser_NH.py","file_name":"Parser_NH.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"458162669","text":"import random\nimport math\nimport sys\n\n# use monte carlo simulation method to count integral of 'y = x^2' on [0, 1]\n\nsample_num = 10000\nif len(sys.argv) == 2:\n sample_num = (int)(sys.argv[1])\n\nsample = [(random.uniform(0, 1), random.uniform(0, 1)) for i in range(sample_num)]\n\ncount_under_curve = 0.0\n\nfor i in range(sample_num):\n x_square = math.pow(sample[i][0], 2)\n y = sample[i][1]\n if y < x_square:\n count_under_curve += 1\n\nprint(\"count_under_curve: %d\" % count_under_curve)\nprint(\"sample_num: %d\" % sample_num)\n\nintegral_simulated = (count_under_curve / sample_num)\nprint(\"integral_simulated: \" + str(integral_simulated))\n","sub_path":"monte_carlo_simulation_count_integral.py","file_name":"monte_carlo_simulation_count_integral.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"455626451","text":"from django.db import models\nimport logging\n\n\nclass CRUDModel(models.Model):\n id = models.BigAutoField(primary_key=True)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n ExtraFields = {}\n\n class Meta:\n abstract = True\n\n def url(self):\n # class name to lower\n class_name = self.__class__.__name__.lower()\n # remove \".models\" from module name\n module_name = self.__class__.__module__[:-7]\n if module_name == class_name:\n return \"/{0}/{1}/\".format(class_name, self.id)\n # return '/locus/' + (self.id)+ '/'\n else:\n return \"/{0}/{1}/{2}/\".format(module_name, class_name, self.id)\n # return '/locus/address/'+ str(self.id) + '/'\n\n @classmethod\n def create(klass, values):\n try:\n obj = klass.objects.create(**values)\n return obj\n except Exception as ex:\n logging.error(ex)\n return None\n\n @classmethod\n def create_v1(klass, values):\n try:\n return klass.objects.get_or_create(**values)\n except Exception as ex:\n logging.error(ex)\n return None\n\n @classmethod\n def update(klass, values):\n try:\n id_ = values['id']\n obj = klass.objects.filter(id=id_).update(**values)\n return obj\n except Exception as ex:\n logging.error(ex)\n return None\n\n @classmethod\n def remove(klass, values):\n try:\n obj = klass.objects.filter(**values)\n if obj.exists():\n obj.delete()\n return True\n except Exception as ex:\n logging.error(ex)\n return False\n\n @classmethod\n def fetch(klass, filters, start=0, count=0):\n try:\n if count > 0:\n return klass.objects.filter(**filters)[:start:(start+count)]\n else:\n return klass.objects.filter(**filters)\n except Exception as ex:\n logging.error(ex)\n return None\n\n @classmethod\n def fetch_all(klass):\n return klass.objects.all()\n\n @classmethod\n def fetch_by_id(klass, id_):\n return klass.objects.filter(id=id_).first()\n\n\nclass CRUDModelWithUrl(CRUDModel):\n ExtraFields = {'url': 'url'}\n\n class Meta:\n abstract = True\n","sub_path":"base/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"597301846","text":"#싱글넘버\n\n#풀이 방법\n\n'''\n0^0 =>0\n4^0 =>4\n4^4 =>0\n두번 같은게 나오면 0으로 리셋\n'''\n\n# # # # # # # # # # # # # # # # # # # # #\nclass Solution:\n def singleNumber(self, nums: List[int]) -> int:\n result = 0\n for num in nums:\n result ^= num\n return result\n","sub_path":"리트코드(파이썬 알고리즘 인터뷰 예제)/비트조작/136.py","file_name":"136.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"17063746","text":"# Methods to load datasets from folders, preprocess them,\n# and build input functions for the estimators.\nimport tensorflow as tf\nimport glob\nimport numpy as np\nfrom tensorflow.contrib.data import Dataset\nimport random\n\ndef loadMultipleDatapaths(parent_folder_list, max_count_each_entries=None, pre_shuffle=False, do_shuffle=True, make_equal=False):\n # Pass list of folder paths\n filepaths_good = []\n filepaths_bad = []\n for parent_folder in parent_folder_list:\n folder_filepaths_good = glob.glob(\"%s/good/*.png\" % parent_folder)\n folder_filepaths_bad = glob.glob(\"%s/bad/*.png\" % parent_folder)\n\n if pre_shuffle:\n # Shuffle individual folder paths\n random.shuffle(folder_filepaths_good)\n random.shuffle(folder_filepaths_bad)\n\n if max_count_each_entries:\n folder_filepaths_good = folder_filepaths_good[:max_count_each_entries]\n folder_filepaths_bad = folder_filepaths_bad[:max_count_each_entries]\n\n filepaths_good.extend(folder_filepaths_good)\n filepaths_bad.extend(folder_filepaths_bad)\n\n # Make count of good and bad equal\n if make_equal:\n n_each = min(len(filepaths_good), len(filepaths_bad))\n filepaths_good = filepaths_good[:n_each]\n filepaths_bad = filepaths_bad[:n_each]\n\n N_good, N_bad = len(filepaths_good), len(filepaths_bad)\n\n # Set up labels\n labels = np.array([1]*N_good + [0]*N_bad, dtype=np.float64)\n\n # Shuffle all entries keeping labels and paths together.\n entries = zip(filepaths_good + filepaths_bad, labels)\n if do_shuffle:\n random.shuffle(entries)\n # Separate back into imgs / labels and return.\n imgs, labels = zip(*entries)\n return tf.constant(imgs), tf.constant(labels), len(labels), sum(labels)\n\ndef buildBothDatasets(img_paths, labels, train_test_split_percentage=0.8):\n # Split into training and test\n split = int(len(img_paths) * train_test_split_percentage)\n tr_imgs = tf.constant(img_paths[:split])\n tr_labels = tf.constant(labels[:split])\n val_imgs = tf.constant(img_paths[split:])\n val_labels = tf.constant(labels[split:])\n\n return tr_imgs, tr_labels, val_imgs, val_labels\n\ndef input_parser(img_path, label):\n # Read the img from file.\n img_file = tf.read_file(img_path)\n img_decoded = tf.image.decode_image(img_file, channels=1)\n\n return img_decoded, label\n\ndef randomize_image(img, contrast_range=[0.2,1.8], brightness_max=0.5):\n # Apply random flips/rotations and contrast/brightness changes to image\n img = tf.image.random_flip_left_right(img)\n img = tf.image.random_flip_up_down(img)\n img = tf.image.random_contrast(img, lower=contrast_range[0], upper=contrast_range[1])\n img = tf.image.random_brightness(img, max_delta=brightness_max)\n img = tf.contrib.image.rotate(img, tf.random_uniform([1], minval=-np.pi, maxval=np.pi))\n return img\n\ndef preprocessor(dataset, batch_size, dataset_length=None, is_training=False):\n if is_training and dataset_length:\n # Shuffle dataset.\n dataset = dataset.shuffle(dataset_length*2)\n\n # Load images from image paths.\n dataset = dataset.map(input_parser)\n\n if is_training:\n # Slightly randomize images.\n dataset = dataset.map(lambda img, label: (randomize_image(img), label))\n\n # Zero mean and unit normalize images, float image output.\n # TODO : Check if this needs to be applied to the predict function also\n # TODO : Does this cancel out random_brightness?\n # dataset = dataset.map(lambda img, label: (tf.image.per_image_standardization(img), label))\n \n # Bring down to 15x15 from 21x21\n dataset = dataset.map(lambda img, label: (tf.image.central_crop(img, 0.666666), label))\n\n # Batch and repeat.\n dataset = dataset.batch(batch_size)\n if is_training:\n dataset = dataset.repeat()\n\n return dataset\n\ndef input_fn(imgs, labels, dataset_length=None, is_training=False, batch_size=50):\n # Returns an appropriate input function for training/evaluation.\n def sub_input_fn():\n dataset = Dataset.from_tensor_slices((imgs, labels))\n # Pre-process dataset into correct form/batching/shuffle etc.\n dataset = preprocessor(dataset, batch_size, dataset_length, is_training)\n\n # Build iterator and return\n one_shot_iterator = dataset.make_one_shot_iterator()\n next_element = one_shot_iterator.get_next()\n\n # Return in a dict so the premade estimators can use it.\n return {\"x\": next_element[0]}, next_element[1]\n return sub_input_fn","sub_path":"training_pipeline/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":4335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"518734357","text":"import requests\r\nfrom shodan import Shodan\r\nimport os\r\nimport threading\r\nfrom urllib.parse import urlparse\r\nimport webbrowser\r\nimport csv\r\n\r\nclass CamScan:\r\n \r\n def __init__(self, dirname='Images', search=None,\r\n path=None, timeout=4, pages=0, verbose=False):\r\n\r\n self.search = search\r\n self.path = path\r\n self.dirname = dirname\r\n self.timeout = timeout\r\n self.pages = pages\r\n self.verbose = verbose\r\n self.api = None\r\n \r\n\r\n try:\r\n \r\n keyfile = open('shodan_api_key','r')\r\n key = keyfile.readline()\r\n keyfile.close()\r\n self.api = Shodan(key)\r\n \r\n except FileNotFoundError:\r\n \r\n print('Key file not found')\r\n \r\n\r\n DIR_NUMBER = 2\r\n while os.path.exists(self.dirname):\r\n self.dirname = self.dirname.strip('0987654321') + str(DIR_NUMBER)\r\n DIR_NUMBER += 1\r\n \r\n\r\n def initShodan(self, key):\r\n\r\n with open('shodan_api_key','w') as file:\r\n file.write(key)\r\n\r\n self.api = Shodan(key)\r\n\r\n def chooseFromCSV(self, file):\r\n\r\n if os.path.exists(file):\r\n \r\n f = open(file, newline='')\r\n data = csv.DictReader(f)\r\n\r\n searches = {}\r\n\r\n for x in data:\r\n searches[x['searchQuery']] = x['imagePath']\r\n\r\n f.close()\r\n \r\n print('CSV file input. Select search from below:\\n')\r\n\r\n y = 0\r\n for search in searches:\r\n print(str(y) + ') ' + search)\r\n y += 1\r\n\r\n choice = int(input('\\nChoose search: '))\r\n self.search = list(searches.keys())[choice]\r\n self.path = list(searches.values())[choice]\r\n\r\n else:\r\n\r\n raise FileNotFoundError\r\n\r\n\r\n def pagesCount(self):\r\n \r\n hosts = self.api.count(self.search)['total']\r\n\r\n return int(hosts / 100) + 1\r\n\r\n\r\n def setPages(self, pages):\r\n\r\n if type(pages) in [int, range, type(None)]:\r\n self.pages = pages\r\n\r\n else:\r\n raise Exception('Wrong type. pages value can be set to int, range, or None')\r\n \r\n\r\n def requestAndDownload(self, url):\r\n\r\n try:\r\n\r\n r = requests.get(url, timeout=self.timeout)\r\n\r\n if r.status_code == 200:\r\n\r\n if self.verbose:\r\n print(url, ' - Success')\r\n \r\n filename = urlparse(url).netloc.replace(':','-') + '.png'\r\n\r\n with open(filename, 'wb') as img:\r\n img.write(r.content)\r\n\r\n else:\r\n if self.verbose:\r\n print(url, r.status_code, 'Error')\r\n\r\n except requests.exceptions.ReadTimeout:\r\n if self.verbose:\r\n print(url, '- Timed out')\r\n\r\n except Exception as e:\r\n #print(e)\r\n if self.verbose:\r\n print(url, '- Connection Error')\r\n\r\n def runOnPage(self, pagenumber):\r\n\r\n results = None\r\n tries = 0\r\n\r\n while results is None and tries < 10:\r\n\r\n try:\r\n results = self.api.search(self.search, page=pagenumber)\r\n\r\n except Exception as e:\r\n tries += 1\r\n print('Shodan error')\r\n if tries == 10:\r\n print('Giving up')\r\n raise Exception(e.args[0])\r\n\r\n threads = []\r\n\r\n for result in results['matches']:\r\n\r\n url = 'http://' + str(result['ip_str']) + ':' + str(result['port']) + self.path\r\n x = threading.Thread(target=self.requestAndDownload, args=(url,))\r\n threads.append(x)\r\n x.start()\r\n\r\n for thread in threads:\r\n thread.join()\r\n \r\n\r\n def run(self):\r\n\r\n if self.api == None:\r\n raise Exception('No Shodan key')\r\n \r\n os.mkdir(self.dirname)\r\n os.chdir(self.dirname)\r\n\r\n print('Saving images to', os.getcwd(), '\\n')\r\n\r\n if self.pages is None:\r\n\r\n print('Running every page')\r\n\r\n for page in range(self.pagesCount()):\r\n print('Starting page:', page)\r\n self.runOnPage(page)\r\n\r\n elif type(self.pages) is int:\r\n\r\n print('Running page', self.pages)\r\n\r\n self.runOnPage(self.pages)\r\n \r\n elif type(self.pages) is range:\r\n\r\n for page in self.pages:\r\n print('Starting page:', page)\r\n self.runOnPage(page)\r\n\r\n\r\n def generatePage(self):\r\n\r\n html = '''\r\n\r\n\r\n\r\n Saved Images\r\n \r\n\r\n\r\n \r\n

Click on an image to open stream

\r\n'''\r\n\r\n with open('images.html', 'w') as page:\r\n\r\n page.write(html)\r\n\r\n for name_of_file in os.listdir():\r\n\r\n if '.png' in name_of_file:\r\n \r\n link = 'http://' + name_of_file.replace('-', ':').strip('.png')\r\n\r\n page.write('\\n\\t'.format(link))\r\n page.write('\\n\\t\\t'.format(name_of_file))\r\n page.write('\\n\\t')\r\n \r\n page.write('\\n\\n')\r\n \r\n \r\n def showImages(self):\r\n webbrowser.open('images.html')\r\n \r\n\r\n def info(self):\r\n\r\n print('search:', self.search)\r\n print('path:', self.path)\r\n print('dirname', self.dirname)\r\n print('timeout:', self.timeout)\r\n print('pages:', self.pages)\r\n","sub_path":"CamScan.py","file_name":"CamScan.py","file_ext":"py","file_size_in_byte":6271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"274191177","text":"from aws_cdk import (\n aws_lambda as lb,\n aws_apigateway as apigw,\n core\n)\n\nclass LambdaStack(core.Stack):\n\n def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:\n super().__init__(scope, id, **kwargs)\n\n lambda_function = lb.Function(self, 'helloworldfunction',\n runtime = lb.Runtime.PYTHON_3_9,\n code = lb.Code.asset('lambda'),\n handler = 'main.handler'\n )\n\n api_gateway = apigw.LambdaRestApi(self, 'helloworld',\n handler = lambda_function,\n rest_api_name = 'mylambdaapi'\n )\n","sub_path":"stacks/lambda_stack.py","file_name":"lambda_stack.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"41379267","text":"# Copyright (c) 2013, Mark Peek \n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty\n\n\nclass SourceBundle(AWSProperty):\n props = {\n 'S3Bucket': (basestring, True),\n 'S3Key': (basestring, True),\n }\n\n\nclass ApplicationVersion(AWSProperty):\n props = {\n 'Description': (basestring, False),\n 'SourceBundle': (SourceBundle, False),\n 'VersionLabel': (basestring, True),\n }\n\n\nclass OptionSettings(AWSProperty):\n props = {\n 'Namespace': (basestring, True),\n 'OptionName': (basestring, True),\n 'Value': (basestring, True),\n }\n\n\nclass ConfigurationTemplate(AWSProperty):\n props = {\n 'TemplateName': (basestring, True),\n 'Description': (basestring, False),\n 'OptionSettings': (list, False),\n 'SolutionStackName': (basestring, False),\n }\n\n\nclass Application(AWSObject):\n props = {\n 'ApplicationVersions': (list, True),\n 'ConfigurationTemplates': (list, False),\n 'Description': (basestring, False),\n }\n\n def __init__(self, name, **kwargs):\n self.type = \"AWS::ElasticBeanstalk::Application\"\n sup = super(Application, self)\n sup.__init__(name, self.type, \"Properties\", self.props, **kwargs)\n\n\nclass Environment(AWSObject):\n props = {\n 'ApplicationName': (basestring, True),\n 'CNAMEPrefix': (basestring, False),\n 'Description': (basestring, False),\n 'OptionSettings': (list, False),\n 'OptionsToRemove': (list, False),\n 'SolutionStackName': (basestring, False),\n 'TemplateName': (basestring, False),\n 'VersionLabel': (basestring, False),\n }\n\n def __init__(self, name, **kwargs):\n self.type = \"AWS::ElasticBeanstalk::Environment\"\n sup = super(Environment, self)\n sup.__init__(name, self.type, \"Properties\", self.props, **kwargs)\n","sub_path":"troposphere/elasticbeanstalk.py","file_name":"elasticbeanstalk.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"67950510","text":"import json\nimport sys\nimport os\n\n\ndef load_data(filepath):\n if not os.path.exists(filepath):\n return None\n with open(filepath,'r') as raw_json_file:\n return json.load(raw_json_file)\n\n\ndef pretty_print_json(json_data):\n print(json.dumps(json_data, indent=4, ensure_ascii=False, sort_keys=True))\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n filepath = sys.argv[1]\n source_json = load_data(filepath)\n pretty_print_json(source_json)\n else:\n print(\"Error! Enter path!\")","sub_path":"pprint_json.py","file_name":"pprint_json.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"267980324","text":"from tkinter import*\r\nimport time\r\n\r\n###### VENTANA INICIAL :(VI)\r\nvnt = Tk()\r\nvnt.title(\"PINBALL EXTREME\")\r\nvnt.geometry(\"800x300\")\r\nvnt.resizable(width=False,height=False)\r\nvnt.config(bg=\"gray\")\r\n##### Imagen de fondo del Menú \r\nfondo= PhotoImage(file=\"fondo.gif\")\r\netimagen = Label(vnt,image=fondo).place(x=0,y=0)\r\nimagen=PhotoImage(file=\"espacio.gif\")\r\n\r\n ###Manual de usuario\r\ndef manual():\r\n ma = open (\"Manual de Usuario.txt\",\"a\")\r\n ma.write(\"Pinball Extreme es un juego basado en una canica que colisona con unos obstaculos\\n\")\r\n ma.write(\"donde cada uno le brinda un puntaje, la paleta izquierda se mueve con la teclaz\\n\")\r\n ma.write(\"y la paleta derecha se mueve con la tecla v, el fin del juego es no dejar que llegue al la line limite\")\r\n ma.close()\r\n \r\npuntaje = 0\r\nvidas = 3\r\n\r\n\r\n#### VENTANA DEL JUEGO\r\ndef nueva():\r\n global puntaje\r\n global vidas\r\n global nombre\r\n global ball\r\n v = Tk()\r\n v.title(\"PINBALL EXTREME\")\r\n v.resizable(width=False,height=False) \r\n c = Canvas(v,width=700,height=600) \r\n c.pack()\r\n #### Limites del juego\r\n c.create_line(10,10,10,590)\r\n c.create_line(650,100,650,590)\r\n c.create_line(10,570,650,570,fil=\"red\")\r\n c.create_line(300,529,450,529,fill=\"white\")\r\n c.create_line(100,530,270,530,fill=\"white\")\r\n ### Creación Canica\r\n ball= { \"dx\": 5,\"dy\": -20, \"obj\":c.create_oval( 660, 550,680,570, fill= \"yellow\")}\r\n #### Variables Puntaje y Vidas: cantidad de bolas restante que posee el jugador \r\n puntaje=0\r\n vidas= 3\r\n \r\n #####################################\r\n ### FUNCION GUARDAR PARTIDA #########\r\n #####################################\r\n def game_save():\r\n global ball\r\n juego = open(\"Partidad Guardada.txt\",\"w\")\r\n ball = c.coords(ball[\"obj\"])\r\n y = \",\".join(str(i)for i in ball)\r\n juego.write(y + \"\\n\")\r\n juego.write(str(puntaje) + \"\\n\")\r\n juego.write(str(vidas) + \"\\n\")\r\n juego.write(str(nombre.get()))\r\n juego.close()\r\n ######################################\r\n ###FUNCION ENCARGADA DE MOVER BOLA####\r\n ######################################\r\n def moverBall1():\r\n global puntaje\r\n global vidas\r\n '''Esta funcion es la encargada de producir el movimiento de la bola y colisones con objetos'''\r\n x1, y1, x2, y2 = c.coords(ball[\"obj\"])\r\n x = (x1+x2)//2\r\n y = (y1+y2)//2 \r\n if x<10 or x>690:\r\n ball[\"dx\"]*=-1\r\n if y<10 or y>590:\r\n ball[\"dy\"]*=-1\r\n\r\n if ( 650 < x < 690 ) and ( y >= 100):\r\n ball[\"dx\"]*=-1\r\n\r\n if ( 5 < x < 15 ) and ( 10 <= y <= 590 ):\r\n ball[\"dx\"]*=-1\r\n \r\n ##################################\r\n #### Colisiones entre objetos ####\r\n ##################################\r\n \r\n ###Colision primer obstaculo \r\n if ( 295 < x < 305 ) and ( 100 <= y <= 200 ):\r\n puntaje+=10\r\n ball[\"dx\"]*=-1\r\n \r\n if ( 95 <= y <= 105 ) and ( 200 <= x <= 300 ):\r\n puntaje+=10\r\n ball[\"dy\"]*=-1\r\n \r\n if ( 195 < x < 205 ) and ( 100 <= y <= 200 ):\r\n puntaje+=10\r\n ball[\"dx\"]*=-1\r\n \r\n if ( 195 <= y <= 205 ) and ( 200 <= x <= 300 ):\r\n puntaje+=10\r\n ball[\"dy\"]*=-1\r\n \r\n ###Colision segundo obstaculo \r\n if ( 595 < x < 605 ) and ( 100 <= y <= 200 ):\r\n puntaje+=20\r\n ball[\"dx\"]*=-1\r\n \r\n if ( 95 <= y <= 105 ) and ( 500 <= x <= 600 ):\r\n puntaje+=20\r\n ball[\"dy\"]*=-1\r\n \r\n if ( 495< x < 505 ) and (100 <= y <= 200):\r\n puntaje+=20\r\n ball[\"dx\"]*=-1\r\n \r\n if ( 195 <= y <= 205 ) and ( 500 <= x <= 600 ):\r\n puntaje+=20\r\n ball[\"dy\"]*=-1\r\n \r\n ###Colision tercer obstaculo\r\n \r\n if ( 105 < x < 115 ) and ( 300 <= y <= 400 ):\r\n puntaje+=15\r\n ball[\"dx\"]*=-1\r\n \r\n if ( 295 <= y <= 305 ) and ( 10 <= x <= 110 ):\r\n puntaje+=15 \r\n ball[\"dy\"]*=-1\r\n\r\n if ( 5 < x < 15 ) and ( 300 <= y <= 400 ):\r\n puntaje+=15\r\n ball[\"dx\"]*=-1\r\n\r\n if ( 395 <= y <= 405 ) and ( 10 <= x <= 110 ):\r\n puntaje+=15\r\n ball[\"dy\"]*=-1 \r\n\r\n ###Colision cuarto obstaculo\r\n\r\n if ( 545 < x < 555 ) and ( 350 <= y <= 450 ):\r\n puntaje+=25\r\n ball[\"dx\"]*=-1\r\n\r\n puntaje = puntaje +100\r\n if ( 345 <= y <= 355 ) and ( 450 <= x <= 550 ):\r\n puntaje+=25\r\n ball[\"dy\"]*=-1\r\n\r\n if ( 445 < x < 455 ) and ( 350 <= y <= 450 ):\r\n puntaje+=25\r\n ball[\"dx\"]*=-1\r\n\r\n if ( 445 <= y <= 455 ) and ( 450 <= x <= 550 ):\r\n puntaje+=25\r\n ball[\"dy\"]*=-1\r\n\r\n ###Colision paletas\r\n \r\n #if ( 525 <= y <= 535 ) and ( 100 <= x <= 270 ):\r\n ball[\"dy\"]*=-1\r\n ball[\"dx\"]*=-2\r\n #if ( 525 <= y <= 535 ) and ( 300 <= x <= 470 ):\r\n ball[\"dy\"]*=-1\r\n if ( 528 <= y <= 532 ) and ( 100 <= x <= 210 ):\r\n ball[\"dy\"]*=-1\r\n \r\n if ( 524<=y<534) and (300<=x<= 450):\r\n ball[\"dy\"]*=-1\r\n \r\n #### Disminución de vidas \r\n if (565<=y<=575) and (10 <= x <= 650):\r\n vidas = vidas - 1\r\n\r\n #if puntaje == 20:\r\n #vidas = vidas + 1\r\n \r\n ball[\"dy\"] = ball[\"dy\"] + 0.35\r\n \r\n c.move(ball[\"obj\"], ball[\"dx\"], ball[\"dy\"])\r\n v.after(70, moverBall1)\r\n\r\n ##### Indicador de Putaje \r\n punt = Label(v,text=\"Puntos Obtenidos\"+str(puntaje),bg=\"green\",relief=SOLID).place(x=400,y=20)\r\n ##### Indicador de vidas\r\n vida = Label(v,text=\"Vidas restantes\"+\"-\"+str(vidas),bg=\"green\",relief=SOLID).place(x=400,y=40)\r\n \r\n v.after(70, moverBall1)\r\n \r\n#### Nombre Usuario \r\n usuario = Label(v,text=str(nombre.get()),bg=\"green\").place(x=135,y=20)\r\n et = Label(v,text=\"NOMBRE USUARIO\",bg=\"red\").place(x=20,y=20)\r\n \r\n#### Obstaculos \r\n c.create_polygon((200,100),(200,200),(300,200),(300,100),fill=\"red\")##obstaculo 1\r\n c.create_polygon((500,100),(500,200),(600,200),(600,100),fill=\"gray\")##obstaculo 2\r\n c.create_polygon((10,300),(10,400),(110,400),(110,300),fill=\"black\")##obstaculo 3\r\n c.create_polygon((450,350),(450,450),(550,450),(550,350),fill=\"red\")##obstaculo 4\r\n \r\n vnt.destroy()\r\n ##################################\r\n #####Funcion Movimiento paletas###\r\n ##################################\r\n def mover_pal(event):\r\n '''Funcion encargada de dezplazar verticalmente las paletas'''\r\n global pal1,pal2\r\n tecla1 = repr(event.char)\r\n tecla2 = repr(event.char)\r\n ####### Paleta lado izquierdo#####\r\n if (tecla1==\"'z'\"): \r\n c.delete(pal1) \r\n pal1 = c.create_polygon((100,530),(100,550),(270,550),(270,530),fill=\"blue\")\r\n c.update()\r\n c.delete(pal1)\r\n \r\n c.after(115)\r\n pal1= c.create_polygon((100,550),(100,570),(270,570),(270,550),fill=\"blue\")\r\n ###### Paleta lado derecho#####\r\n if (tecla2 == \"'v'\"):\r\n c.delete(pal2)\r\n pal2 = c.create_polygon((300,530),(300,550),(470,550),(470,530),fill=\"blue\")\r\n c.update()\r\n c.delete(pal2)\r\n c.after(115)\r\n pal2 = c.create_polygon((300,550),(300,570),(470,570),(470,550),fill=\"blue\")\r\n \r\n global pal1\r\n global pal2\r\n c.bind(\"\",mover_pal)\r\n c.focus_set()\r\n \r\n ## Paletas:\r\n pal1 = c.create_polygon((100,550),(100,570),(270,570),(270,550),fill=\"blue\")\r\n pal2 = c.create_polygon((300,550),(300,570),(470,570),(470,550),fill=\"blue\")\r\n\r\n ##### BOTON GUARDAR PÁRTIDA\r\n guardar = Button(v,text=\"Guardar Partida\",bg=\"red\",command=game_save)\r\n guardar.pack()\r\n \r\n###### Botones ventana inicial\r\nnombre= StringVar()\r\net = Label(vnt,text=\"Nombre de Usuario\",bg=\"blue\").place(x=10,y=240)\r\nbnt = Button(vnt,text=\"Guardar usuario\",command=nueva,bg=\"red\").place(x=320,y=240)\r\ncam= Entry(vnt,textvariable=nombre,width=20).place(x=140,y=240)\r\nmanual = Button(vnt,text=\"Manual del Usuario\",bg=\"green\",command=manual).place(x=520,y=240)\r\n##############################################################################################\r\n######Cuandricula de guia: encargada de facilitarme el diseño y ubicación de los obstaculos###\r\n##############################################################################################\r\n##width=700\r\n## height=600\r\n## linea_distance = 100\r\n##\r\n## def cuadricula(c, line_distance):\r\n## for x in range(line_distance,width,line_distance):\r\n## c.create_line(x, 0, x, height, fill=\"red\") \r\n## for y in range(line_distance,height,line_distance):c.create_line(0, y, width, y, fill=\"red\") \r\n##\r\n## cuadricula(c,100)\r\nvnt.mainloop()\r\n\r\n\r\n\r\n","sub_path":"juego4.0.py","file_name":"juego4.0.py","file_ext":"py","file_size_in_byte":9203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"523132925","text":"import numpy as np\nfrom numba import njit\n\nfrom .base import TimeSeriesTest\nfrom ._utils import _CheckInputs, compute_stat\nfrom ..independence import MGC\n\n\nclass MGCX(TimeSeriesTest):\n \"\"\"\n Compute the MGC test statistic and p-value.\n\n Attributes\n ----------\n stat : float\n The computed independence test statistic.\n pvalue : float\n The computed independence test p-value.\n \"\"\"\n\n def __init__(self, compute_distance=None, max_lag=0):\n TimeSeriesTest.__init__(self, compute_distance=compute_distance)\n self.max_lag = max_lag\n\n def _statistic(self, x, y):\n \"\"\"\n Calulates the MGC test statistic.\n\n Parameters\n ----------\n x, y : ndarray\n Input data matrices that have shapes depending on the particular\n independence tests (check desired test class for specifics).\n\n Returns\n -------\n stat : float\n The computed independence test statistic.\n \"\"\"\n check_input = _CheckInputs(\n x, y, max_lag=self.max_lag, compute_distance=self.compute_distance\n )\n x, y = check_input()\n\n stat, opt_lag = compute_stat(x, y, MGC, self.compute_distance, self.max_lag)\n self.stat = stat\n self.opt_lag = opt_lag\n\n return stat, opt_lag\n\n def test(self, x, y, reps=1000, workers=1):\n \"\"\"\n Calulates the MGC test p-value.\n\n Parameters\n ----------\n x, y : ndarray\n Input data matrices that have shapes depending on the particular\n independence tests (check desired test class for specifics).\n reps : int, optional\n The number of replications used in permutation, by default 1000.\n\n Returns\n -------\n pvalue : float\n The computed independence test p-value.\n \"\"\"\n check_input = _CheckInputs(\n x, y, max_lag=self.max_lag, compute_distance=self.compute_distance\n )\n x, y = check_input()\n\n return super(MGCX, self).test(x, y, reps, workers)\n","sub_path":"hyppo/time_series/mgcx.py","file_name":"mgcx.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"496129846","text":"###\n# Python program to calculate the sum of cubes using mathematical equation\n###\n\ndef sum_cube(n):\n if n < 0:\n print(\"Incorrect input\")\n elif n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n sum = (n * (n + 1) / 2) \n return (sum * sum) \n\nif __name__ == \"__main__\":\n print(\"Enter the n number of which sum of cubes need to be find\")\n n = int(input())\n print(sum_cube(n))","sub_path":"Basic_programs/sum_cubic_mathematical.py","file_name":"sum_cubic_mathematical.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"398418938","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models\nfrom odoo.addons import decimal_precision as dp\nfrom odoo.exceptions import UserError\n\nclass ProductAlternative(models.Model):\n _name = 'product.alternative'\n _description = 'Alternate Product'\n\n product_tmpl_id = fields.Many2one('product.template', string='Original')\n product_alt_id = fields.Many2one('product.template', string='Product')\n default_code = fields.Char(related='product_alt_id.default_code', string=\"SKU\")\n list_price = fields.Float(related='product_alt_id.list_price', string=\"Sales Price\")\n standard_price = fields.Float(related='product_alt_id.standard_price', string=\"Cost\")\n manufacturer = fields.Many2one(related='product_alt_id.manufacturer', string=\"Manufacturer\")\n manufacturer_pref = fields.Char(related='product_alt_id.manufacturer_pref',string='Manuf. SKU')\n qty_available = fields.Float(related='product_alt_id.qty_available', string=\"On Hand\")\n virtual_available = fields.Float(related='product_alt_id.virtual_available', string=\"Forecasted\")\n\n @api.multi\n def unlink(self):\n for record in self:\n result = self.env.cr.execute('delete from product_alternative where product_tmpl_id = %s or product_alt_id = %s' % (record.product_alt_id.id,record.product_alt_id.id) )\n return super(ProductAlternative, self).unlink()\n\n @api.model\n def create(self, vals):\n res = super(ProductAlternative, self).create(vals)\n\n # prevent recursive calls\n if 'stop' in vals:\n return res\n\n # check for recursive data entry\n if vals['product_tmpl_id'] == vals['product_alt_id']:\n raise UserError(\"Product cannot be an alternative of itself!\")\n\n # get a list of alternates for the alternate product\n existing_alternates = self.env['product.alternative'].search([('product_tmpl_id','=',vals['product_tmpl_id']),\n ('product_alt_id','!=',vals['product_alt_id'])])\n\n for alt in existing_alternates:\n #_logger.warning(\"EXISTING ALTERNATIVE FOUND\")\n #_logger.warning(\"tmpl= \"+str(alt.product_tmpl_id.id)+\", alt=\"+ str(alt.product_alt_id.id))\n alt_exists = self.env['product.alternative'].search([('product_tmpl_id','=',alt.product_alt_id.id),\n ('product_alt_id','=',vals['product_alt_id'])])\n\n if not alt_exists:\n #_logger.warning(\"ALT DOESNT EXIST\")\n #_logger.warning(\"tmpl= \"+str(alt.product_tmpl_id.id)+\", alt=\"+ str(vals['product_alt_id']))\n new_alt={}\n new_alt['product_tmpl_id'] = alt.product_alt_id.id\n new_alt['product_alt_id'] = vals['product_alt_id']\n new_alt['stop'] = True\n res2 = self.create(new_alt)\n\n alt_inverse_exists = self.env['product.alternative'].search([('product_tmpl_id','=',vals['product_alt_id']),\n ('product_alt_id','=',alt.product_alt_id.id)])\n if not alt_inverse_exists:\n #_logger.warning(\"ALT INVERSE DOESNT EXIST\")\n #_logger.warning(\"tmpl= \"+str(vals['product_alt_id'])+\", alt=\"+ str(alt.product_alt_id.id))\n new_alt_inv={}\n new_alt_inv['product_tmpl_id'] = vals['product_alt_id']\n new_alt_inv['product_alt_id'] = alt.product_alt_id.id\n new_alt_inv['stop'] = True\n res3 = self.create(new_alt_inv)\n\n\n # check if the reverse relationship exists\n inverse_exists = self.env['product.alternative'].search([('product_tmpl_id','=',vals['product_alt_id']),\n ('product_alt_id','=',vals['product_tmpl_id'])])\n\n if not inverse_exists:\n #_logger.warning(\"CREATING ALTERNATE\")\n inverse_product = self.env['product.template'].search([('id','=',vals['product_alt_id'])])\n new_inverse = {}\n new_inverse['product_tmpl_id'] = vals['product_alt_id']\n new_inverse['product_alt_id'] = vals['product_tmpl_id']\n new_inverse['stop'] = True\n res4 = self.create(new_inverse)\n\n return res\n\nclass ProductTemplate(models.Model):\n _inherit = \"product.template\"\n\n alternate_ids = fields.One2many('product.alternative','product_tmpl_id','Alternates')\n\n ph_qty_available = fields.Float(\n \"Quantity On Hand (incl. alt)\",\n store=False,\n readonly=True,\n compute=\"_compute_ph_quantities\",\n search=\"_search_ph_qty_available\",\n digits=dp.get_precision(\"Product Unit of Measure\"),\n )\n\n ph_virtual_available = fields.Float(\n \"Forecasted Quantity (incl. alt)\",\n store=False,\n readonly=True,\n compute=\"_compute_ph_quantities\",\n search=\"_search_ph_virtual_available\",\n digits=dp.get_precision(\"Product Unit of Measure\"),\n )\n\n ph_incoming_qty = fields.Float(\n \"Incoming (incl. alt)\",\n store=False,\n readonly=True,\n compute=\"_compute_ph_quantities\",\n search=\"_search_ph_incoming_qty\",\n digits=dp.get_precision(\"Product Unit of Measure\"),\n )\n\n ph_outgoing_qty = fields.Float(\n \"Outgoing (incl. alt)\",\n store=False,\n readonly=True,\n compute=\"_compute_ph_quantities\",\n search=\"_search_ph_outgoing_qty\",\n digits=dp.get_precision(\"Product Unit of Measure\"),\n )\n\n def action_open_ph_quants(self):\n self.env[\"stock.quant\"]._merge_quants()\n self.env[\"stock.quant\"]._unlink_zero_quants()\n products = self.mapped(\"product_variant_ids\")\n products |= (\n self.mapped(\"alternate_ids\")\n .mapped(\"product_alt_id\")\n .mapped(\"product_variant_ids\")\n )\n products = self.mapped(\"alternate_ids\").mapped(\"product_alt_id\").mapped(\"product_variant_ids\")\n action = self.env.ref(\"stock.product_open_quants\").read()[0]\n action[\"domain\"] = [(\"product_id\", \"in\", products.ids)]\n action[\"context\"] = {\"search_default_internal_loc\": 1}\n return action\n\n @api.multi\n def _compute_ph_quantities(self):\n res = self._compute_ph_quantities_dict()\n for template in self:\n template.ph_qty_available = res[template.id][\"qty_available\"]\n template.ph_virtual_available = res[template.id][\"virtual_available\"]\n template.ph_incoming_qty = res[template.id][\"incoming_qty\"]\n template.ph_outgoing_qty = res[template.id][\"outgoing_qty\"]\n\n def _compute_ph_quantities_dict(self):\n self_variants = self.mapped(\"product_variant_ids\")\n self_variants |= (\n self.mapped(\"alternate_ids\").mapped(\"product_alt_id\").mapped(\"product_variant_ids\")\n )\n self_variants = self.mapped(\"alternate_ids\").mapped(\"product_alt_id\").mapped(\"product_variant_ids\")\n variants_available = self_variants._product_available()\n prod_available = {}\n for template in self:\n qty_available = 0\n virtual_available = 0\n incoming_qty = 0\n outgoing_qty = 0\n for p in variants_available.items():\n qty_available += p[1][\"qty_available\"]\n virtual_available += p[1][\"virtual_available\"]\n incoming_qty += p[1][\"incoming_qty\"]\n outgoing_qty += p[1][\"outgoing_qty\"]\n prod_available[template.id] = {\n \"qty_available\": qty_available,\n \"virtual_available\": virtual_available,\n \"incoming_qty\": incoming_qty,\n \"outgoing_qty\": outgoing_qty,\n }\n return prod_available\n\n @api.multi\n def action_open_ph_quants_unreserved(self):\n product_ids = self.mapped(\"product_variant_ids\")\n product_ids |= (\n self.mapped(\"alternate_ids\")\n .mapped(\"product_alt_id\")\n .mapped(\"product_variant_ids\")\n )\n product_ids = product_ids.ids\n quants = self.env[\"stock.quant\"].search([(\"product_id\", \"in\", product_ids)])\n quant_ids = quants.filtered(lambda x: x.quantity > x.reserved_quantity).ids\n result = self.env.ref(\"stock.product_open_quants\").read()[0]\n result[\"domain\"] = [(\"id\", \"in\", quant_ids)]\n result[\"context\"] = {\n \"search_default_locationgroup\": 1,\n \"search_default_internal_loc\": 1,\n }\n return result\n\n @api.multi\n def action_open_ph_forecast(self):\n product_ids = self.mapped(\"product_variant_ids\")\n product_ids |= (\n self.mapped(\"alternate_ids\")\n .mapped(\"product_alt_id\")\n .mapped(\"product_variant_ids\")\n )\n product_ids = self.mapped(\"alternate_ids\").mapped(\"product_alt_id\").mapped(\"product_variant_ids\")\n product_ids = product_ids.ids\n result = self.env.ref(\n \"stock.action_stock_level_forecast_report_template\"\n ).read()[0]\n result[\"domain\"] = [(\"product_id\", \"in\", product_ids)]\n result[\"context\"] = {\"group_by\": [\"product_id\"]}\n return result\n\n def _search_ph_qty_available(self, operator, value):\n domain = [(\"ph_qty_available\", operator, value)]\n product_variant_ids = self.env[\"product.product\"].search(domain)\n return [(\"product_variant_ids\", \"in\", product_variant_ids.ids)]\n\n def _search_ph_virtual_available(self, operator, value):\n domain = [(\"ph_virtual_available\", operator, value)]\n product_variant_ids = self.env[\"product.product\"].search(domain)\n return [(\"product_variant_ids\", \"in\", product_variant_ids.ids)]\n\n def _search_ph_incoming_qty(self, operator, value):\n domain = [(\"ph_incoming_qty\", operator, value)]\n product_variant_ids = self.env[\"product.product\"].search(domain)\n return [(\"product_variant_ids\", \"in\", product_variant_ids.ids)]\n\n def _search_ph_outgoing_qty(self, operator, value):\n domain = [(\"ph_outgoing_qty\", operator, value)]\n product_variant_ids = self.env[\"product.product\"].search(domain)\n return [(\"product_variant_ids\", \"in\", product_variant_ids.ids)]\n\n\n","sub_path":"stock_alternate/models/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":10207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"8536432","text":"import faiss\nimport laserencoder\n\nenc = laserencoder.Encoder()\n\n\ndef indexing(infile=\"./sep_target.txt\"):\n with open(infile) as f:\n text = f.read()\n xb = enc.encode(text)\n d = 1024\n quantizer = faiss.IndexFlatL2(d)\n index = faiss.IndexIVFFlat(quantizer, d, 1014)\n index.train(xb)\n index.add(xb)\n faiss.write_index(index, \"index.faiss\")\n\n\nif __name__ == \"__main__\":\n indexing()\n","sub_path":"retrieval/indexing.py","file_name":"indexing.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"204245955","text":"\n\n#calss header\nclass _HEN():\n\tdef __init__(self,): \n\t\tself.name = \"HEN\"\n\t\tself.definitions = [u'an adult female chicken, often kept for its eggs', u'the female of any bird', u'a woman who is going to get married soon, when she is at a hen party (= a party with her women friends to celebrate this)', u'used as a way of talking to a woman or girl, especially a friend: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_hen.py","file_name":"_hen.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"334408091","text":"#!/usr/bin/env python3\n\n'''\n__author__ = \"Nicholas Leone\"\n__email__ = \"nicholasjinleone@gmail.com\"\n__email__ = \"nleone6@gatech.edu\"\n'''\n\n'''\nAbsolute Error of Chan Ho and Harbi Position Calculation\n\nSubscribes to localization_data_chan_ho_topic, localization_data_harbi_topic, and gazebo/model_states.\n\nCalculates error between the calculated position and the actual position of the Roomba for each localization algorithm.\n'''\n\n\nfrom logging import error\n\nfrom scipy.linalg.decomp_svd import null_space\nimport rospy\nfrom pozyx_simulation.msg import uwb_data\nfrom geometry_msgs.msg import Pose\nfrom gazebo_msgs.msg import ModelStates\n\nimport tf\nimport time\nimport math\n\nimport numpy as np\nimport matplotlib.pyplot as plt \nimport math\nfrom scipy.linalg import eigvals\nimport re\n\nall_distance = []\nall_destination_id = []\npose_x = 0 \npose_y = 0\n\nglobal sensor_pos\nsensor_pos = []\nbase_K = 0\nglobal r_coefficient, constant, c, counter\nr_coefficient = []\nconstant = []\ncounter = 0\nc = 0.299792458\n\nglobal robot_pose_x,robot_pose_y,robot_pose_z\nrobot_pose_x =0\nrobot_pose_y =0\nrobot_pose_z =0\n\nrospy.init_node('position_error_node', anonymous=True)\npub_chan_ho = rospy.Publisher('position_error_chan_ho_topic', Pose, queue_size=10)\npub_harbi = rospy.Publisher('position_error_harbi_topic', Pose, queue_size=10)\n\n\ndef subscribe_data_robot_poses(ModelStates):\n #for the get real position of robot subscribe model states topic\n global robot_pose_x, robot_pose_y, robot_pose_z\n global counter\n counter = counter + 1\n\n #gazebo/modelstate topic frequency is 100 hz. We descrese 10 hz with log method\n if counter % 100 == 0:\n counter = 0\n\n #ModelStates.pose[2] = turtlebot3 model real position on modelstates\n #robot_pose_x =ModelStates.pose[MODELSTATE_INDEX].position.x*1000\n #robot_pose_y =ModelStates.pose[MODELSTATE_INDEX].position.y*1000\n #robot_pose_z =ModelStates.pose[MODELSTATE_INDEX].position.z*1000\n\n robot_pose_x = ModelStates.pose[MODELSTATE_INDEX].position.x\n robot_pose_y = ModelStates.pose[MODELSTATE_INDEX].position.y\n robot_pose_z = ModelStates.pose[MODELSTATE_INDEX].position.z\n\ndef subscribe_data_position_error_chan_ho(calc_pose):\n error_pose = Pose()\n error_pose.position.x = np.abs(calc_pose.position.x - robot_pose_x)\n error_pose.position.y = np.abs(calc_pose.position.y - robot_pose_y)\n pub_chan_ho.publish(error_pose)\n\ndef subscribe_data_position_error_harbi(calc_pose):\n error_pose = Pose()\n error_pose.position.x = np.abs(calc_pose.position.x - robot_pose_x)\n error_pose.position.y = np.abs(calc_pose.position.y - robot_pose_y)\n pub_harbi.publish(error_pose)\n\nif __name__ == \"__main__\":\n #get uwb anchors postion\n #global sensor_pos\n\n MODELSTATE_INDEX = rospy.get_param('/pozyx_simulation/modelstate_index',2)\n rospy.loginfo(\"%s is %s\", rospy.resolve_name('/pozyx_simulation/modelstate_index'), MODELSTATE_INDEX)\n\n\n time.sleep(0.5)\n\n #get robot real position => you can change ModelStates.pose[] different robot's\n rospy.Subscriber('gazebo/model_states', ModelStates, subscribe_data_robot_poses)\n\n rospy.Subscriber('localization_data_chan_ho_topic', Pose, subscribe_data_position_error_chan_ho)\n\n rospy.Subscriber('localization_data_harbi_topic', Pose, subscribe_data_position_error_harbi)\n\n # spin() simply keeps python from exiting until this node is stopped\n rospy.spin()","sub_path":"advoard_localization/src/position_error.py","file_name":"position_error.py","file_ext":"py","file_size_in_byte":3421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"248181886","text":"import base64, tarfile, os, tempfile\n\nimport utils\n\nbasedir = utils.getPackageRoot()\n\ndata_dir = os.path.join( basedir, 'data' )\nDATA_FILE = os.path.join( data_dir, 'skeleton.data' )\nSKELETON_DIR = os.path.join( data_dir, 'skeleton' )\n\ndef unpack ( destination, datafile=DATA_FILE ):\n \n with open( datafile ) as f:\n temp = tempfile.NamedTemporaryFile( delete=False, suffix='.tar.gz' )\n temp.write( base64.b64decode(f.read()) )\n temp.close()\n \n tar = tarfile.open( name=temp.name, mode='r' )\n tar.extractall(path=destination)\n tar.close()\n return destination\n\n\n\ndef _writeSkeleton ( inp, out ):\n \"\"\"\n Write a base64 representation of the scaffolding for the website\n this can then be decoded and unpacked via tar.\n\n \"\"\"\n\n tarout = out+'.tar.gz' \n tar = tarfile.TarFile( name=tarout, mode='w' )\n tar.add(inp, arcname='site')\n tar.close()\n data = base64.b64encode(open( tarout, 'rt' ).read())\n os.remove(tarout)\n with open( out, 'w' ) as f:\n f.write(data)\n \nif __name__ == '__main__':\n _writeSkeleton( SKELETON_DIR, DATA_FILE )\n\n","sub_path":"post/skeleton.py","file_name":"skeleton.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"292361107","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 8 13:10:49 2019\n\n@author: ishuu\n\"\"\"\n\nn = int(input())\ni = 1\nwhile i != n:\n for j in range(i,n+1):\n print(j, end='')\n print(\"\\n\")\n i += 1","sub_path":"Python and ML/Day 1/format.py","file_name":"format.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"551537216","text":"#YouTube Corey Schafer\n\n#Press Q to exit the plot window\n#from matplotlib import pyplot as plt\n\n\"\"\"\n#Matplotlib Tutorial (Part 1)_ Creating and Customizing Our First Plots [720p]\n#A format string consists of marker, line, and color. fmt = \"[color][marker][line]\". Each is optional. If not provided, then default used; however, if line is given and no marker, then it's a line without markers. formatstringexample = \"k--\" is black color dash line.\nprint(plt.style.available) #print ['seaborn-ticks', 'seaborn-poster', 'seaborn-pastel', 'seaborn-colorblind', 'seaborn-darkgrid', 'seaborn-paper', 'fivethirtyeight', 'ggplot', 'seaborn-notebook', 'seaborn-white', 'seaborn-whitegrid', 'seaborn-deep', 'seaborn-talk', 'grayscale', 'seaborn-dark', 'seaborn-dark-palette', 'seaborn-muted', 'bmh', '_classic_test', 'fast', 'Solarize_Light2', 'dark_background', 'seaborn-bright', 'seaborn', 'classic']\nplt.style.use(\"fivethirtyeight\") #RM: run the matplotlib style without the attributes. I kept the attributes as reference.\n#plt.xkcd() #It's a method to run a matplotlib style which mimics webcomics\n#Ages values for the median salaries\nagesx = [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]\n# Median Developer Salaries by Age\ndevy = [38496, 42000, 46752, 49320, 53200, 56000, 62316, 64928, 67317, 68748, 73752]\n#Median Python Developer Salaries by Age\npydevy = [45372, 48876, 53850, 57287, 63016, 65998, 70003, 70000, 71496, 75370, 83640]\n#Median JavaScript Developer Salaries by Age\njsdevy = [37810, 43515, 46823, 49293, 53437, 56373, 62375, 66674, 68745, 68746, 74583]\nplt.plot(agesx, devy, color=\"#444444\", linestyle=\"--\", marker=\".\", label=\"All Devs devy legend label\")\nplt.plot(agesx, pydevy, color=\"b\", marker=\"o\", linewidth=3, label=\"Python pydevy legend label\")\nplt.plot(agesx, jsdevy, color=\"yellow\", linewidth=3, label=\"JavaScript jsdevy legend label\")\nplt.xlabel(\"Ages plt.xlabel()\")\nplt.ylabel(\"Median Salary plt.ylabel()\")\nplt.title(\"Median Salary (USD) by Age plt.title()\")\nplt.legend()\nplt.grid(True)\nplt.tight_layout() #improves padding\n#plt.legend([\"All Devs devy\",\"Python pydevy\"]) #RM: another way to display legend for which y-labels in list are in order of plt.plot()\nplt.savefig(\"savechartplot.png\")\nplt.show()\n\"\"\"\n\n\"\"\"\n#Matplotlib Tutorial (Part 2)_ Bar Charts and Analyzing Data from CSVs [720p]\n#Bar and line in one chart use both plt.bar and plt.plot\nagesx = [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]\ndevy = [38496, 42000, 46752, 49320, 53200, 56000, 62316, 64928, 67317, 68748, 73752]\npydevy = [45372, 48876, 53850, 57287, 63016, 65998, 70003, 70000, 71496, 75370, 83640]\njsdevy = [37810, 43515, 46823, 49293, 53437, 56373, 62375, 66674, 68745, 68746, 74583]\nplt.style.use(\"fivethirtyeight\")\n# plt.bar(agesx, devy, color=\"#444444\", label=\"All Devs devy legend label\")\n# plt.plot(agesx, pydevy, color=\"b\", marker=\"o\", linewidth=3, label=\"Python pydevy legend label\")\n# plt.plot(agesx, jsdevy, color=\"yellow\", linewidth=3, label=\"JavaScript jsdevy legend label\")\n# plt.xlabel(\"Ages plt.xlabel()\")\n# plt.ylabel(\"Median Salary plt.ylabel()\")\n# plt.title(\"Median Salary (USD) by Age plt.title()\")\n# plt.legend()\n# plt.tight_layout() #improves padding\n# plt.show()\n#Multiple bars in one chart\nimport numpy as np\n# xindexes = np.arange(len(agesx))\n# barwidth = 0.25\n# plt.bar(xindexes - barwidth, devy, width=barwidth, color=\"#444444\", label=\"All Devs devy legend label\")\n# plt.bar(xindexes, pydevy, width=barwidth, color=\"b\", label=\"Python pydevy legend label\")\n# plt.bar(xindexes + barwidth, jsdevy, width=barwidth, color=\"yellow\", label=\"JavaScript jsdevy legend label\")\n# plt.xticks(ticks=xindexes, labels=agesx) #RM: error message specifically assigning tick marks and labels plt.xticks(ticks=xindexes, labels=agesx). It now works because I updated matplotlib.\n# plt.xlabel(\"Ages plt.xlabel()\")\n# plt.ylabel(\"Median Salary plt.ylabel()\")\n# plt.title(\"Median Salary (USD) by Age plt.title()\")\n# plt.legend()\n# plt.tight_layout() #improves padding\n# plt.show()\n#Import CSV file csv\nimport csv\nfrom collections import Counter\n# with open(\"data.csv\") as csvfile:\n# \tcsvreader = csv.DictReader(csvfile) #reads csv file as a dictionary\n# \trow = next(csvreader)\n# \tprint(row) #print OrderedDict([('Responder_id', '1'), ('LanguagesWorkedWith', 'HTML/CSS;Java;JavaScript;Python')])\n# \tprint(row[\"LanguagesWorkedWith\"]) #print HTML/CSS;Java;JavaScript;Python\n# \tprint(row[\"LanguagesWorkedWith\"].split(\";\")) #print ['HTML/CSS', 'Java', 'JavaScript', 'Python']\t\n# \tlanguagescounter = Counter()\n# \t#for loop reads each row in csvreader, splits the language, adds to languagescounter to count\n# \tfor eachrow in csvreader:\n# \t\tlanguagescounter.update(eachrow[\"LanguagesWorkedWith\"].split(\";\"))\n# print(languagescounter) #print Counter({'JavaScript': 59218, 'HTML/CSS': 55465, 'SQL': 47544, 'Python': 36442, 'Java': 35916, 'Bash/Shell/PowerShell': 31991, 'C#': 27097, 'PHP': 23030, 'C++': 20524, 'TypeScript': 18523, 'C': 18017, 'Other(s):': 7920, 'Ruby': 7331, 'Go': 7201, 'Assembly': 5833, 'Swift': 5744, 'Kotlin': 5620, 'R': 5048, 'VBA': 4781, 'Objective-C': 4191, 'Scala': 3309, 'Rust': 2794, 'Dart': 1683, 'Elixir': 1260, 'Clojure': 1254, 'WebAssembly': 1015, 'F#': 973, 'Erlang': 777})\n# print(languagescounter.most_common(15)) #print [('JavaScript', 59218), ('HTML/CSS', 55465), ('SQL', 47544), ('Python', 36442), ('Java', 35916), ('Bash/Shell/PowerShell', 31991), ('C#', 27097), ('PHP', 23030), ('C++', 20524), ('TypeScript', 18523), ('C', 18017), ('Other(s):', 7920), ('Ruby', 7331), ('Go', 7201), ('Assembly', 5833)]\n# languagesx = []\n# popularityy = []\n# for item in languagescounter.most_common(15):\n# \tlanguagesx.append(item[0])\n# \tpopularityy.append(item[1])\n# print(languagesx) #print ['JavaScript', 'HTML/CSS', 'SQL', 'Python', 'Java', 'Bash/Shell/PowerShell', 'C#', 'PHP', 'C++', 'TypeScript', 'C', 'Other(s):', 'Ruby', 'Go', 'Assembly']\n# print(popularityy) #print [59218, 55465, 47544, 36442, 35916, 31991, 27097, 23030, 20524, 18523, 18017, 7920, 7331, 7201, 5833]\n# #Instructor says faster using zip function and unpacking values.\n# languagesx.reverse() #RM: doesn't work sorting. It works now because I updated matplotlib.\n# popularityy.reverse() #RM: doesn't work sorting. It works now because I updated matplotlib.\n# plt.barh(languagesx, popularityy) #horizontal bar chart\n# plt.xlabel(\"Number Of People Who Use plt.xlabel()\")\n# plt.ylabel(\"Programming Languaes plt.ylabel()\")\n# plt.title(\"Most Popular Languages plt.title()\")\n# plt.tight_layout()\n# plt.show()\n#Import CSV file Pandas\nimport pandas as pd\nfrom matplotlib import pyplot as plt #RM: Python pandas must be uploaded first, matplotlib afterwards.\ndata = pd.read_csv(\"data.csv\")\nids = data[\"Responder_id\"]\nlanguageresponses = data[\"LanguagesWorkedWith\"]\nlanguagescounter = Counter()\nfor response in languageresponses:\n\tlanguagescounter.update(response.split(\";\"))\nprint(languagescounter) #print Counter({'JavaScript': 59218, 'HTML/CSS': 55465, 'SQL': 47544, 'Python': 36442, 'Java': 35916, 'Bash/Shell/PowerShell': 31991, 'C#': 27097, 'PHP': 23030, 'C++': 20524, 'TypeScript': 18523, 'C': 18017, 'Other(s):': 7920, 'Ruby': 7331, 'Go': 7201, 'Assembly': 5833, 'Swift': 5744, 'Kotlin': 5620, 'R': 5048, 'VBA': 4781, 'Objective-C': 4191, 'Scala': 3309, 'Rust': 2794, 'Dart': 1683, 'Elixir': 1260, 'Clojure': 1254, 'WebAssembly': 1015, 'F#': 973, 'Erlang': 777})\nprint(languagescounter.most_common(15)) #print [('JavaScript', 59218), ('HTML/CSS', 55465), ('SQL', 47544), ('Python', 36442), ('Java', 35916), ('Bash/Shell/PowerShell', 31991), ('C#', 27097), ('PHP', 23030), ('C++', 20524), ('TypeScript', 18523), ('C', 18017), ('Other(s):', 7920), ('Ruby', 7331), ('Go', 7201), ('Assembly', 5833)]\nlanguagesx = []\npopularityy = []\nfor item in languagescounter.most_common(15):\n\tlanguagesx.append(item[0])\n\tpopularityy.append(item[1])\nprint(languagesx) #print ['JavaScript', 'HTML/CSS', 'SQL', 'Python', 'Java', 'Bash/Shell/PowerShell', 'C#', 'PHP', 'C++', 'TypeScript', 'C', 'Other(s):', 'Ruby', 'Go', 'Assembly']\nprint(popularityy) #print [59218, 55465, 47544, 36442, 35916, 31991, 27097, 23030, 20524, 18523, 18017, 7920, 7331, 7201, 5833]\n#Instructor says faster using zip function and unpacking values.\nlanguagesx.reverse()\npopularityy.reverse()\nplt.barh(languagesx, popularityy) #horizontal bar chart\nplt.xlabel(\"Number Of People Who Use plt.xlabel()\")\nplt.ylabel(\"Programming Languaes plt.ylabel()\")\nplt.title(\"Most Popular Languages plt.title()\")\nplt.tight_layout()\nplt.show()\n\"\"\"\n\n\"\"\"\n#Matplotlib Tutorial (Part 3)_ Pie Charts [720p]\nplt.style.use(\"fivethirtyeight\")\n# slices = [120, 80, 30, 20]\n# labels = [\"Sixty\",\"Forty\", \"Extra1\",\"Extra2\"]\n# colorslices = [\"#008fd5\",\"r\",\"yellow\",\"green\"]\n# slices = [59219, 55466, 47544, 36443, 35917, 31991, 27097, 23030, 20524, 18523, 18017, 7920, 7331, 7201, 5833]\n# labels = [\"JavaScript\", \"HTML/CSS\", \"SQL\", \"Python\", \"Java\", \"Bash/Shell/PowerShell\", \"C#\", \"PHP\", \"C++\", \"TypeScript\", \"C\", \"Other(s):\", \"Ruby\", \"Go\", \"Assembly\"]\nslicestopfive = [59219, 55466, 47544, 36443, 35917]\nlabelstopfive = [\"JavaScript\", \"HTML/CSS\", \"SQL\", \"Python\", \"Java\"]\ncolorslices = [\"#008fd5\",\"r\",\"yellow\",\"green\",\"b\"]\nexplodemovesliceout = [0, 0, 0, 0.1, 0]\nplt.pie(slicestopfive, labels=labelstopfive, colors=colorslices, explode=explodemovesliceout, shadow=True, startangle=90, autopct=\"%1.1f%%\", wedgeprops={\"edgecolor\":\"black\"}) #start angle rotates counterclockwise. autopct includes percentages of the total pie chart.\nplt.title(\"Top Five Programming Languages Count Of People plt.title()\")\nplt.tight_layout()\nplt.show()\n\"\"\"\n\n\"\"\"\n#Matplotlib Tutorial (Part 4)_ Stack Plots [720p]\n#RM: Area plot or area chart\nplt.style.use(\"fivethirtyeight\")\ntime = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n#player1points = [1, 2, 3, 3, 4, 4, 4, 4, 5]\n#player2points = [1, 1, 1, 1, 2, 2, 2, 3, 4]\n#player3points = [1, 1, 1, 2, 2, 2, 3, 3, 3]\ndeveloper1hours = [8, 6, 5, 5, 4, 2, 1, 1, 0]\ndeveloper2hours = [0, 1, 2, 2, 2, 4, 4, 4, 4]\ndeveloper3hours = [0, 1, 1, 1, 2, 2, 3, 3, 4]\n#labelslist = [\"player1points\", \"player2points\", \"player3points\"]\nlabelslist = [\"developer1hours\", \"developer2hours\", \"developer3hours\"]\ncolorslist = [\"#6d904f\",\"#fc4f30\",\"008fd5\"]\n#plt.stackplot(time, player1points, player2points, player3points, colors=colorslist, labels=labelslist)\nplt.stackplot(time, developer1hours, developer2hours, developer3hours, colors=colorslist, labels=labelslist)\n#plt.legend(loc=\"lower left\")\nplt.legend(loc=(0.07, 0.05)) #these are percentages away from left and away from bottom.\n#plt.title(\"My Awesome Stack Plot Points Scored Per Time\")\nplt.title(\"My Awesome Stack Plot Hours Worked Per Time\")\nplt.tight_layout()\nplt.show()\n\"\"\"\n\n\"\"\"\n#Matplotlib Tutorial (Part 5)_ Filling Area on Line Plots [720p]\nimport pandas as pd\nfrom matplotlib import pyplot as plt #RM: Python pandas must be uploaded first, matplotlib afterwards.\ndata = pd.read_csv(\"data05fillbetweens.csv\")\nages = data[\"Age\"]\ndevelopersalary = data[\"All_Devs\"]\npythonsalary = data[\"Python\"]\njavascriptsalary = data[\"JavaScript\"]\nplt.plot(ages, developersalary, color=\"#444444\", linestyle=\"--\", label=\"All Developers Salary\")\nplt.plot(ages, pythonsalary, label=\"Python Salary\")\noverallmedian = 78508 #RM: Excel calculated overall median 78508. Instructor edited numbers during tutorial.\n#plt.fill_between(ages, pythonsalary, overallmedian, where=(pythonsalary > overallmedian), interpolate=True, alpha=0.25) #alpha is transparency. 1 is solid or no transparency. where denotes which qualifying areas to display transparency. interpolate fill more accurately.\n#plt.fill_between(ages, pythonsalary, overallmedian, where=(pythonsalary <= overallmedian), interpolate=True, color=\"red\", alpha=0.25) #alpha is transparency. 1 is solid or no transparency. where denotes which qualifying areas to display transparency. interpolate fill more accurately.\nplt.fill_between(ages, pythonsalary, developersalary, where=(pythonsalary > developersalary), interpolate=True, alpha=0.25, label=\"Above Average\") #alpha is transparency. 1 is solid or no transparency. where denotes which qualifying areas to display transparency. interpolate fill more accurately. These are filling between pythonsalary line and developersalary line\nplt.fill_between(ages, pythonsalary, developersalary, where=(pythonsalary <= developersalary), interpolate=True, color=\"red\", alpha=0.25, label=\"Below Average\") #alpha is transparency. 1 is solid or no transparency. where denotes which qualifying areas to display transparency. interpolate fill more accurately. These are filling between pythonsalary line and developersalary line\nplt.legend()\nplt.title(\"Median Salary (USD) by Age\")\nplt.xlabel(\"Ages\")\nplt.ylabel(\"Median Salary (USD)\")\nplt.tight_layout()\nplt.show()\n\"\"\"\n\n\"\"\"\n#Matplotlib Tutorial (Part 6)_ Histograms [720p]\n#https://stackoverflow.com/questions/26454649/python-round-up-to-the-nearest-ten\n#https://stackoverflow.com/questions/3348825/how-to-round-integers-in-python\n#Historgrams is lower end inclusive to upper end exclusive\nimport pandas as pd\nfrom matplotlib import pyplot as plt #RM: Python pandas must be uploaded first, matplotlib afterwards.\nfrom math import *\nplt.style.use(\"fivethirtyeight\")\nages = [18, 19, 21, 25, 26, 26, 30, 32, 38, 45, 55]\n#binslist = [10, 20, 30, 40, 50, 60]\nprint(floor(min(ages)/10.0)*10) #print 10\nprint(ceil(max(ages)/10.0)*10) #print 60\nlowestbin = floor(min(ages)/10.0)*10\nhighestbin = ceil(max(ages)/10.0)*10\nbinslist = [x for x in range(lowestbin, highestbin+1,10)]\nprint(binslist) #print [10, 20, 30, 40, 50, 60]\nplt.hist(ages, bins=binslist, edgecolor=\"black\")\nplt.title(\"Ages Of Respondents\")\nplt.xlabel(\"Ages\")\nplt.ylabel(\"Total Respondents\")\nplt.tight_layout()\nplt.show()\nbinslistexcludelowerages = [20, 30, 40, 50, 60]\nplt.title(\"Ages Of Respondents 20 and greater\")\nplt.hist(ages, bins=binslistexcludelowerages, edgecolor=\"black\")\nplt.show()\ndata = pd.read_csv(\"data06histograms.csv\")\nids = data[\"Responder_id\"]\nages = data[\"Age\"]\nbins = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]\nplt.hist(ages, bins=bins, edgecolor=\"black\", log=True) #print on logarithmeic scale log=True\nmedianage = 29\nplt.axvline(medianage, color=\"#fc4f30\", label=\"Median Age\", linewidth=2) #axis vertical line\nplt.legend()\nplt.title(\"Ages Of Respondents From data06histograms.csv\")\nplt.xlabel(\"Ages\")\nplt.ylabel(\"Total Respondents\")\nplt.tight_layout()\nplt.show()\n\"\"\"\n\n\"\"\"\n#Matplotlib Tutorial (Part 7)_ Scatter Plots [720p]\nimport pandas as pd\nfrom matplotlib import pyplot as plt #RM: Python pandas must be uploaded first, matplotlib afterwards.\nplt.style.use(\"seaborn\")\nx = [5, 7, 8, 5, 6, 7, 9, 2, 3, 4, 4, 4, 2, 6, 3, 6, 8, 6, 4, 1]\ny = [7, 4, 3, 9, 1, 3, 2, 5, 2, 4, 8, 7, 1, 6, 4, 9, 7, 7, 5, 1]\nplt.scatter(x, y, s=100, c=\"green\", marker=\"X\", edgecolor=\"black\", linewidth=1, alpha=0.75) #s is size of dot plots, c is color, edgecolor is the marker's border color, linewidth is the size of the border, alpha is transparency 0 is transparent to 1 is solid\nplt.tight_layout()\nplt.show()\ndifferentgraycolorforeachplot = [7, 5, 9, 7, 5, 7, 2, 5, 3, 7, 1, 2, 8, 1, 9, 2, 5, 6, 7, 5]\nplt.scatter(x, y, s=100, c=differentgraycolorforeachplot, marker=\"*\", edgecolor = \"black\", linewidth=1, alpha=1)\nplt.tight_layout()\nplt.show()\ndotplotsizes = [209, 486, 381, 255, 191, 351, 185, 228, 174, 538, 239, 394, 399, 153, 273, 293, 436, 501, 397, 539]\nplt.scatter(x, y, s=dotplotsizes, c=differentgraycolorforeachplot, cmap=\"Greens\", marker=\"*\", edgecolor = \"black\", linewidth=1, alpha=1) #cmap or color map plots different green color for each x value plot. Also, different sizes for each dot plots s = dotplotsizes.\ncolorbar = plt.colorbar() #colorbar to assist reading how reading the \ncolorbar.set_label(\"Colorbar Title For plt.colorbar()\")\nplt.tight_layout()\nplt.show()\nyoutubedata = pd.read_csv(\"2019-05-31-data.csv\")\nviewcount = youtubedata[\"view_count\"]\nlikes = youtubedata[\"likes\"]\nratio = youtubedata[\"ratio\"]\nplt.scatter(viewcount, likes, c=ratio, cmap=\"summer\")\ncolorbar = plt.colorbar() #colorbar to assist reading how reading the \ncolorbar.set_label(\"Colorbar Title For plt.colorbar() Like/Dislike Ratio\")\nplt.xscale(\"log\") #set the x axis and y axis scale to log between 10^6 to 10^9 in this data set\nplt.yscale(\"log\") #set the x axis and y axis scale to log between 10^6 to 10^9 in this data set\nplt.title(\"Trending YouTube Videos\")\nplt.xlabel(\"View Count\")\nplt.ylabel(\"Total Likes\")\nplt.tight_layout()\nplt.show()\n\"\"\"\n\n\"\"\"\n#Matplotlib Tutorial (Part 8)_ Plotting Time Series Data [720p]\nimport pandas as pd\nfrom datetime import datetime, timedelta\nfrom matplotlib import pyplot as plt #RM: Python pandas must be uploaded first, matplotlib afterwards.\nfrom matplotlib import dates as mpldates\nplt.style.use(\"seaborn\")\n#Instructor showed how to format dates\ndates = [datetime(2019, 5, 24), datetime(2019, 5, 25), datetime(2019, 5, 26), datetime(2019, 5, 27), datetime(2019, 5, 28), datetime(2019, 5, 29), datetime(2019, 5, 30)]\ny = [0, 1, 3, 4, 6, 5, 7]\n#plt.plot_date(dates, y) #plot a scatterplot dates is x-axis and y is y-axis\n#plt.plot_date(dates, y, linestyle=\"solid\") #plot a line chart dates is x-axis and y is y-axis\n#plt.gcf().autofmt_xdate() #gcf get current figure. autofmt automatically format to make x-axis look better.\n#dateformat = mpldates.DateFormatter(\"%b %d, %Y\") #format date to display month day, year May 24, 2019\n#plt.gca().xaxis.set_major_formatter(dateformat) #gca get current axis set the formatting for the X-axis\n#plt.show()\n\nbitcointdata = pd.read_csv(\"data08timeseries.csv\")\nbitcointdata[\"Date\"] = pd.to_datetime(bitcointdata[\"Date\"]) #RM: Currently the dates in the file is read as a string. Convert to date.\nbitcointdata.sort_values(\"Date\", inplace=True)\npricedate = bitcointdata[\"Date\"] #case sensitive\npriceclose = bitcointdata[\"Close\"] #case sensitive\nplt.plot_date(pricedate, priceclose, linestyle=\"solid\") #plot a line chart dates is x-axis and y is y-axis\nplt.gcf().autofmt_xdate() #gcf get current figure. autofmt automatically format to make x-axis look better.\nplt.title(\"Bitcoin Prices\")\nplt.xlabel(\"Date\")\nplt.ylabel(\"Closing Price\")\nplt.tight_layout()\nplt.show()\n\"\"\"\n\n\n#Matplotlib Tutorial (Part 9)_ Plotting Live Data in Real-Time [720p]\n#RM: The instructor is simulating a live chart updating every one second.\nimport random\nfrom itertools import count\nimport pandas as pd\nfrom matplotlib import pyplot as plt #RM: Python pandas must be uploaded first, matplotlib afterwards.\nfrom matplotlib.animation import FuncAnimation\n# plt.style.use(\"fivethirtyeight\")\n# xvalues = []\n# yvalues = []\n# index = count()\n# print(index) #print count(0)\n# next(index)\n# print(index) #print count(1)\n# next(index)\n# def animate(i):\n# \txvalues.append(next(index)) #RM: I wrote two print(index) and two next(index). Chart starts at x=2.\n# \tyvalues.append(random.randint(0, 5))\n# \tplt.cla() #cla instructor thinks clear axis. It eliminates the different color variation.\n# \tplt.plot(xvalues, yvalues)\n# variableanimate = FuncAnimation(plt.gcf(), animate, interval=1000) #gcf get current figure. interval=1000 is one second.\n# plt.tight_layout()\n# plt.show()\nplt.style.use(\"fivethirtyeight\")\n#Run data_get.py to generate a simulation of live data saved to data09livedata.csv. Run the Python code as data_get.py generates data.\nindex = count()\ndef animate(i):\n\tlivedata = pd.read_csv(\"data09livedata.csv\")\n\tx = livedata[\"x_value\"]\n\ty1 = livedata[\"total_1\"]\n\ty2 = livedata[\"total_2\"]\n\tplt.cla() #cla instructor thinks clear axis. It eliminates the different color variation.\n\tplt.plot(x, y1, label=\"Channel 1\")\n\tplt.plot(x, y2, label=\"Channel 2\")\n\tplt.legend(loc=\"upper left\")\n\tplt.tight_layout()\nvariableanimate = FuncAnimation(plt.gcf(), animate, interval=1000) #gcf get current figure. interval=1000 is one second.\nplt.tight_layout()\nplt.show()\n","sub_path":"matplotlibcoreyschafer.py","file_name":"matplotlibcoreyschafer.py","file_ext":"py","file_size_in_byte":19804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"465285491","text":"from datetime import datetime\nfrom enum import Enum\n\nfrom pydantic import BaseModel\n\nfrom sneakpeek.scraper_config import ScraperConfig\n\nUNSET_ID: int = -1\n\n\nclass ScraperSchedule(str, Enum):\n \"\"\"\n Scraper schedule options. Note that it's disallowed to have 2 concurrent\n scraper jobs, so if there's an active scraper job new job won't be scheduled\n \"\"\"\n\n INACTIVE = \"inactive\" #: Scraper won't be automatically scheduled\n EVERY_SECOND = \"every_second\" #: Scraper will be scheduled every second\n EVERY_MINUTE = \"every_minute\" #: Scraper will be scheduled every minute\n EVERY_HOUR = \"every_hour\" #: Scraper will be scheduled every hour\n EVERY_DAY = \"every_day\" #: Scraper will be scheduled every day\n EVERY_WEEK = \"every_week\" #: Scraper will be scheduled every week\n EVERY_MONTH = \"every_month\" #: Scraper will be scheduled every month\n CRONTAB = \"crontab\" #: Specify crontab when scraper should be scheduled\n\n\nclass ScraperJobPriority(Enum):\n \"\"\"Priority of the scraper job\"\"\"\n\n UTMOST = 0 #:\n HIGH = 1 #:\n NORMAL = 2 #:\n\n\nclass ScraperJobStatus(str, Enum):\n \"\"\"Scraper job status\"\"\"\n\n PENDING = \"pending\" #: Scraper job is in the queue\n #: Scraper job was dequeued by the worker and is being processed\n STARTED = \"started\"\n FAILED = \"failed\" #: Scraper job failed\n SUCCEEDED = \"succeeded\" #: Scraper job succeeded\n DEAD = \"dead\" #: Scraper job was inactive for a while, so scheduler marked it as dead and scheduler can schedule scraper again\n KILLED = \"killed\" #: Scraper job was killed by the user\n\n\nclass Scraper(BaseModel):\n \"\"\"Scraper metadata\"\"\"\n\n id: int #: Scraper unique identifier\n name: str #: Scraper name\n schedule: ScraperSchedule #: Scraper schedule configuration\n schedule_crontab: str | None #: Must be defined if schedule equals to ``CRONTAB``\n handler: str #: Name of the scraper handler that implements scraping logic\n config: ScraperConfig #: Scraper configuration that is passed to the handler\n #: Default priority to enqueue scraper jobs with\n schedule_priority: ScraperJobPriority = ScraperJobPriority.NORMAL\n\n\nclass ScraperJob(BaseModel):\n \"\"\"Scraper job metadata\"\"\"\n\n id: int #: Job unique identifier\n scraper: Scraper #: Scraper metadata\n status: ScraperJobStatus #: Scraper job status\n priority: ScraperJobPriority #: Scraper job priority\n created_at: datetime #: When the job was created and enqueued\n #: When the job was dequeued and started being processed by the worker\n started_at: datetime | None = None\n last_active_at: datetime | None = None #: When the job last sent heartbeat\n finished_at: datetime | None = None #: When the job finished\n result: str | None = None #: Information with the job result (should be rather small and should summarize the outcome of the scraping)\n\n\nclass Lease(BaseModel):\n \"\"\"Lease metadata\"\"\"\n\n name: str #: Lease name (resource name to be locked)\n owner_id: str #: ID of the acquirer (should be the same if you already have the lease and want to prolong it)\n acquired: datetime #: Time when the lease was acquired\n acquired_until: datetime #: Time until the lease is acquired\n","sub_path":"sneakpeek/lib/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"439447422","text":"\"\"\"\nWe truthfully declare:\n- to have contributed approximately equally to this assignment [if this is not true, modify this sentence to disclose individual contributions so we can grade accordingly]\n- that we have neither helped other students nor received help from other students\n- that we provided references for all code that is not our own\n\nName Student 1 email@vu.nl\nName Student 2 email@vu.nl\n\"\"\"\n# replace \"grep\" in the following lines with the correct commands for exercises 1-8\ncommands = {\n1 : \"\"\"grep\"\"\",\n2 : \"\"\"grep\"\"\",\n3 : \"\"\"grep\"\"\",\n4 : \"\"\"grep\"\"\",\n5 : \"\"\"grep\"\"\",\n6 : \"\"\"grep\"\"\",\n7 : \"\"\"grep\"\"\",\n8 : \"\"\"grep\"\"\"}\n\nimport os\nos.chdir(\"C:/Users/Jesse/OneDrive/Bureaublad laptop Jesse/Pre-master/Project big data/W1/data-week-1\")\n\ndef ex9(): # for exercise 9\n None\n\n#First, move to directory where the data map is located\noutfile = 'data/outfile.txt'\ntextfile1 = 'data/textfile1.txt'\ntextfile2 = 'data/textfile2.txt'\ndef ex10(textfile1,textfile2,outfile): \n with open(textfile1) as f1:\n linesf1 = [line.rstrip('\\n') for line in f1]\n \n with open(textfile2) as f2:\n linesf2 = [line.rstrip('\\n') for line in f2]\n \n result = [linesf1[i] for i in range(0,len(linesf1)) if linesf1[i] not in linesf2]\n \n outfile = open(outfile, 'w')\n \n outfile.write(\"\\n\".join(sorted(result)))\n \n outfile.close()\n \n \ndef ex11(filename):\n None\n\n#First, move to directory where the data map is located\ntextfile='data/draughts2.txt'\ndef ex12(textfile):\n \n def readCoordinate(c):\n clist = []\n x = c[1:2]\n y = c[3:4]\n clist.append(x)\n clist.append(y)\n return clist\n \n def printLine(l):\n for i in l:\n print(i, end = '')\n \n def printFirstLineOfBoard(): \n Start=[\" \",\"_\",\"_\",\"_\",\"_\"]\n Mid = [\"_\",\"_\",\"_\",\"_\"]\n End = [\"_\",\"_\",\"_\",\" \"]\n \n startLine = Start + 8* Mid + End\n \n printLine(startLine)\n print(\"\")\n \n def printBoard(clist,plist):\n board = [] \n for k in range(0,30):\n boardLineStartAndMid = [\"|\",\" \",\" \",\" \"] \n boardLineEnd = [\"|\",\" \",\" \",\" \",\"|\"]\n boardLine1 = boardLineStartAndMid * 9 + boardLineEnd\n boardLine2 = boardLineStartAndMid * 9 + boardLineEnd\n boardEndlineEnd = [\"|\",\"_\",\"_\",\"_\",\"|\"]\n boardEndline = [\"|\",\"_\",\"_\",\"_\"]\n boardEndLine = boardEndline * 9 + boardEndlineEnd\n \n board.append(boardLine1)\n board.append(boardLine2)\n board.append(boardEndLine)\n \n # A dictionary to translate coordinates from the file to the place\n # on the correct position in the board variable, the beginline is excluded as its seperatly printed \n # for example: (1,1) is located at board[28,2], (1,2) at board[25,2] and (2,1) at board[28,6]\n # note: x and y at board are mirrored! so board[y,x]\n xCoordinatedict = {1:2,2:6,3:10,4:14,5:18,6:22,7:26,8:30,9:34,10:38}\n yCoordinatedict = {1:28,2:25,3:22,4:19,5:16,6:13,7:10,8:7,9:4,10:1} \n \n for i in range(0,len(clist)):\n x = int(clist[i][0])\n y = int(clist[i][1])\n if x % y == 0:\n boardX = xCoordinatedict.get(x)\n boardY = yCoordinatedict.get(y)\n board[boardY][boardX] = str(plist[i]) \n else: next\n \n printFirstLineOfBoard()\n \n for j in range(0,30): \n printLine(board[j]) \n print(\"\")\n \n #main of ex12\n import re as re\n #read line of file\n with open(textfile) as f:\n flines = f.readlines()\n \n coordinateList = []\n pieceList = []\n #is it in the desired format?\n for line in flines:\n match = re.search(r'(^[\\(\\S][0-9]+,[0-9]+\\))(\\t)(.)',line)\n if match:\n coordinateList.append(readCoordinate(match.group(1)))\n pieceList.append(match.group(3))\n else:next \n \n #A board with size 10x10 is assumed\n printBoard(coordinateList,pieceList)\n \n\n \n \n","sub_path":"Assignment1/solution Jesse.py","file_name":"solution Jesse.py","file_ext":"py","file_size_in_byte":4156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"624039349","text":"from django.db import models\nimport copy\nfrom base.lib import BaseRouter\nfrom django.contrib.postgres.fields import JSONField\nimport random\n# Create your models here.\n\nclass TrafficDensity(models.Model):\n time = models.DateTimeField(auto_now_add=True)\n bounds = models.CharField(max_length = 200)\n\nclass RandomMixin(object):\n class Meta:\n abstract: True\n \n @classmethod\n def get_random(cls):\n queryset = cls.objects.all()\n return queryset[random.randint(0, queryset.count()-1)]\n\nclass LocationMixin(models.Model):\n class Meta:\n abstract = True\n\n lat = models.FloatField(max_length = 15)\n lng = models.FloatField(max_length = 15)\n\n ref = None\n increment = None\n num_rows = None\n num_cols = None\n\n\n @classmethod \n def populate_given_points(cls, points):\n \"\"\"expects a list of tuples\"\"\"\n nodes = []\n for p in points:\n nodes.append(cls(lat = round(p[0],8), lng = round(p[1],8)))\n \n cls.objects.bulk_create(nodes)\n\n @classmethod\n def populate_test_points(cls):\n \n ref = cls.ref\n increment = cls.increment\n\n if None in [ref, increment, cls.num_cols, cls.num_rows]:\n raise NameError(\"ref and increment both need to be set\")\n\n start_point = [ref[0], ref[1]]\n cursor = copy.copy(start_point)\n nodelist = []\n for i in range(cls.num_rows):\n for j in range(cls.num_cols):\n node = cls(lat = round(cursor[0],8), lng = round(cursor[1],8))\n\n nodelist.append(node)\n cursor[1] += increment[1]\n #reset the longitude and increment the lattitude\n cursor[1] = start_point[1]\n cursor[0] += increment[0]\n\n cls.objects.bulk_create(nodelist)\n\n\nclass Destination(LocationMixin, RandomMixin, models.Model):\n \"\"\"class for general destinations, used to baseline against driving directly to the destination\"\"\"\n destinations = [\n (49.277614, -123.107193), #Rogers Arena\n (49.276329, -123.109085), #BC Place\n (49.278449, -123.137253), #Sunset Beach\n (49.287744, -123.114120), #Canada Place\n (49.282620, -123.121577), #Robson Square\n (49.284573, -123.124854), #Robson & Thurlow\n ]\n\n # ref = [49.278335, -123.118608]\n # increment = [0.002115,-0.002115]\n # num_rows = 5\n # num_cols = 5\n\n\nclass TestOwner(LocationMixin, RandomMixin, models.Model):\n \"\"\"class for test owners. these locations are desireable destinations such as downtown/north shore\"\"\"\n # stations = [\n # (49.286162, -123.111596),\n # (49.282690, -123.118579),\n # (49.274781, -123.121874),\n # (49.266661, -123.115711),\n # (49.263022, -123.114505),\n # (49.250057, -123.116117),\n # (49.233281, -123.116616),\n # (49.226482, -123.116099),\n # (49.209805, -123.116918),\n # ]\n closest_train_route = JSONField(null=True)\n closest_train_name = models.CharField(max_length = 50, null = True, default = None)\n\n \"\"\"Store walking route to closest train in JSON\"\"\"\n def build_closest_train_route(self):\n if(self.closest_train_route is None):\n r = BaseRouter()\n print(r.api_calls)\n station = r.get_closest(SkyTrainStation, self, limit=1)[0]\n #try the api call\n try:\n route = r.get_directions(self, station, BaseRouter.ModeOptions.walking)\n except Exception as e:\n print(\"The following error on owner{id}.\".format(id=self.id))\n print(str(e))\n print(\"quitting...\")\n return\n else: \n self.closest_train_route = route\n self.closest_train_name = station.name\n #try to save \n try:\n self.save()\n except:\n print(\"Error saving google maps response\")\n else:\n print(\"API Calls: \", r.api_calls)\n print(\"station name:\", station)\n else:\n print(\"owner with id {id} already build\".format(id=self.id))\n\n \n \n\nclass TestDriver(LocationMixin, RandomMixin, models.Model):\n \"\"\"Class for test drivers. These locations are spread throughout the city\"\"\"\n #point 12\n ref = [49.213317, -123.094433]\n increment = [0.011423, -0.011423]\n num_rows = 4\n num_cols = 1\n\n test_complete = models.BooleanField(default = False)\n end_dest_is_personal_spot = models.BooleanField(default = False) #get the user to click this if their end destination is to a personal spot. Do not add parking time in this case\n\n\nclass Route(models.Model):\n \"\"\"Represents a single route all the way from A -> B \n UNUSED FOR NOW\n \"\"\"\n driver = models.ForeignKey(TestDriver, models.CASCADE)\n departure_datetime = models.DateTimeField(auto_now_add = True)\n arrival_datetime = models.DateTimeField() #Used for when they arrive at their destination (will allow us to check our estimate accuracy)\n distance = models.FloatField() #total distance travelled in km\n predicted_time = models.IntegerField() #total time\n actual_time = models.IntegerField()\n route = JSONField(default = None)\n\nclass RouteSegment(models.Model):\n \"\"\"Represents one segment of one route. i.e. 'walked' for '5 mins'\"\"\"\n route = models.ForeignKey(Route, models.CASCADE)\n method = models.CharField(max_length = 30) #i.e. driving, walking, train, etc.\n distance = models.FloatField()\n predicted_time = models.IntegerField() #i.e. 5 minutes\n actual_time = models.IntegerField()\n route_segment = JSONField(default = None)\n\nclass SkyTrainStation(LocationMixin, models.Model):\n stations = [\n (49.285949, -123.111583), #Waterfront\n (49.282470, -123.118614), #Vancouver City Center\n (49.274578, -123.121870), #Yaletown-Roundhouse\n (49.266531, -123.115736), #Olympic Village\n (49.262836, -123.114511), #Broadway City Hall\n (49.249180, -123.115846), #King Edward Station\n (49.233140, -123.116660), #Oakridge 41st ave\n (49.226302, -123.116108), #Langara 49th ave\n (49.209595, -123.116902), #Marine Drive Station\n (49.195524, -123.126042), #Bridgeport Station\n (49.183972, -123.136329), #Aberdeen Station\n (49.174779, -123.136575), #Lansdowne\n (49.168142, -123.136302), #Ridgemont-Brighouse\n (49.196672, -123.146402), #Templeton (Airport Route)\n (49.193040, -123.158049), #Sea Island Center (Airport Route)\n (49.194275, -123.178435), #YVR Airport Station (Airport Route)\n ]\n names = [\n 'Waterfront',\n 'Vancouver City Center',\n 'Yaletown-Roundhouse',\n 'Olympic Village',\n 'Broadway City Hall',\n 'King Edward',\n 'Oakridge 41st Ave',\n 'Langara 49th Ave',\n 'Marine Drive',\n 'Bridgeport',\n 'Aberdeen',\n 'Lansdowne',\n 'Ridgemont-Brighouse',\n 'Templeton',\n 'Sea Island Center',\n 'YVR Airport',\n ]\n name = models.CharField(max_length = 50, null=True, default = None)\n \n def __str__(self):\n return \"Nearest to: {stn}\".format(stn = self.name)\n\n @classmethod\n def populate_canada_line(cls):\n ar = []\n index=0\n for s in cls.stations:\n ar.append(cls(lat=s[0], lng=s[1], name = cls.names[index]))\n index += 1\n\n cls.objects.bulk_create(ar)\n \n\nclass Polylines(models.Model):\n #drive-walk\n drive_walk_driveline = models.CharField(max_length = 1000, default = None)\n drive_walk_walkline = models.CharField(max_length = 1000, default = None)\n #drive-train\n drive_transit_driveline = models.CharField(max_length = 1000, default = None)\n drive_transit_walkline = models.CharField(max_length = 1000, default = None)\n drive_transit_trainline = models.CharField(max_length = 1000, default = None)\n #drive\n drive_driveline = models.CharField(max_length = 1000, default = None)\n #transit\n transit_transitline = models.CharField(max_length = 1000, default = None)\n #walk\n walk_walkline = models.CharField(max_length = 1000, default = None)\n\n \nclass Entry(models.Model):\n departure_dayname = models.CharField(max_length = 10)\n departure_datetime = models.DateTimeField(auto_now_add = True)\n models.DateTimeField()\n start_point_long = models.DecimalField(max_digits = 12, decimal_places=8)\n start_point_lat = models.DecimalField(max_digits = 12, decimal_places=8)\n end_point_long = models.DecimalField(max_digits= 12, decimal_places=8)\n end_point_lat = models.DecimalField(max_digits=12, decimal_places=8)\n\n driver = models.ForeignKey(TestDriver, on_delete = models.DO_NOTHING, default = None)\n destination = models.ForeignKey(Destination, on_delete = models.DO_NOTHING, default = None)\n polylines = models.ForeignKey(Polylines, on_delete = models.CASCADE, default = None)\n\n #Drive-Walk\n drive_walk_drivetime = models.IntegerField()\n drive_walk_drivetime_traffic = models.IntegerField(default = None, null=True)#######\n drive_walk_walktime = models.IntegerField()\n #Drive-Train\n drive_transit_drivetime = models.IntegerField()\n drive_transit_drivetime_traffic = models.IntegerField(default = None, null= True)######\n drive_transit_walktime = models.IntegerField()\n drive_transit_traintime = models.IntegerField()\n #Drive\n drive_drivetime = models.IntegerField()\n drive_drivetime_traffic = models.IntegerField(default = None, null=True)#######\n drive_parktime = models.IntegerField(default = None)\n #Transit\n transit_transittime = models.IntegerField()\n #Walk\n walk_walktime = models.IntegerField()","sub_path":"fuzzy-friend/base/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"328315862","text":"#!/usr/bin/env python3\n\"\"\"\nScript that builds a number of python frameworks as\nused by the run_tests.py script\n\nFIXME:\n- Both variants need to be build with simular options\n to the official builds: 32-bit with SDK 10.4u and deployment\n target 10.3, 3-way without SDK and depl. target 10.5.\n\n This will have to wait until my sdkroot patches get committed,\n without that patch I cannot build the 32-bit variant on \n SL.\n- get rid of the global variables\n\"\"\"\nimport sys\nsys.dont_write_bytecode = True\n\nimport subprocess, getopt, logging, os, shutil\nfrom urllib.request import urlopen\nimport pprint\n\n\ngUsage=\"\"\"\\\nbuild_frameworks.py [-v versions] [--versions=versions] [-a archs] [--arch=archs] [-f flavours] [--flavours=flavours]\n\n- versions: comma seperated list of python versiosn, defaults to \"2.6,2.7,3.1,3.2\"\n- archs: comma seperated list of build architectures, defaults to \"32-bit,3-way\"\n- flavours: comma separated list of build variants, defaults to \"release,debug\"\n\"\"\"\n\ngBaseDir = os.path.dirname(os.path.abspath(__file__))\n\ngArchs = (\"32-bit\", \"3-way\", \"intel\")\n\n\n# Name of the Python framework and any additional arguments\n# passed to the configure command.\ngFlavours = [\n dict(\n name=\"debug\",\n template=\"DbgPython-{archs}\",\n flags=[\n \"--with-pydebug\",\n ],\n ),\n dict(\n name=\"release\",\n template=\"ReleasePython-{archs}\",\n flags=[\n ],\n ),\n]\n\n\n# Location of the SVN branches to be used\ngURLMap = {\n '2.6': 'http://svn.python.org/projects/python/branches/release26-maint',\n '2.7': 'http://svn.python.org/projects/python/branches/release27-maint',\n\n '3.1': 'http://svn.python.org/projects/python/branches/release31-maint',\n '3.2': 'http://svn.python.org/projects/python/branches/py3k',\n}\n\n\n# Name of the OSX SDK used to build the framework, keyed of the architecture\n# variant.\ngSdkMap={\n '32-bit': '/Developer/SDKs/MacOSX10.4u.sdk',\n '3-way': '/',\n 'intel': '/',\n}\n\n# Name of the OSX Deployment Target used to build the framework, keyed of \n# the architecture variant.\ngDeploymentTargetMap={\n '32-bit': '10.3',\n #'32-bit': '10.5',\n '3-way': '10.5',\n 'intel': '10.6',\n}\n\ngArchMap = {\n '2.6': {'32-bit'},\n '2.7': {'32-bit', 'intel', '3-way'},\n '3.1': {'32-bit'},\n '3.2': {'32-bit', 'intel', '3-way'},\n}\n\nclass ShellError (Exception):\n \"\"\" An error occurred while running a shell command \"\"\"\n pass\n\ndef create_checkout(version):\n \"\"\"\n Create or update the checkout of the given version\n of Python.\n \"\"\"\n lg = logging.getLogger(\"create_checkout\")\n lg.info(\"Create checkout for %s\", version)\n\n checkoutdir = os.path.join(gBaseDir, \"checkouts\", version)\n if not os.path.exists(checkoutdir):\n lg.debug(\"Create directory %r\", checkoutdir)\n os.makedirs(checkoutdir)\n\n if os.path.exists(os.path.join(checkoutdir, '.svn')):\n lg.debug(\"Update checkout\")\n p = subprocess.Popen([\n 'svn', 'up'],\n cwd=checkoutdir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,\n env=os.environ)\n else:\n lg.debug(\"Initial checkout checkout\")\n p = subprocess.Popen([\n 'svn', 'co', gURLMap[version], checkoutdir], stdout=subprocess.PIPE, stderr=subprocess.STDOUT,\n env=os.environ)\n\n data = p.communicate()[0]\n xit = p.wait()\n if xit == 0:\n lg.info(\"Checkout for %s is now up-to-date\", version)\n else:\n print(data.decode('utf-8').rstrip())\n lg.warn(\"Checkout for %s failed\", version)\n raise ShellError(xit)\n\ndef build_framework(flavour, version, archs):\n \"\"\"\n Build the given version of Python in the given architecture\n variant. \n\n This also installs distribute and virtualenv (the latter using\n a local copy of the package).\n \"\"\"\n lg = logging.getLogger(\"build_framework\")\n lg.info(\"Build %s framework version=%r archs=%r\", flavour[\"name\"], version, archs)\n\n builddir = os.path.join(gBaseDir, \"checkouts\", version, \"build\")\n if os.path.exists(builddir):\n lg.debug(\"Remove existing build tree\")\n shutil.rmtree(builddir)\n\n lg.debug(\"Create build tree %r\", builddir)\n os.mkdir(builddir)\n\n lg.debug(\"Running 'configure'\")\n p = subprocess.Popen([\n \"../configure\",\n \"--enable-framework\",\n \"--with-framework-name={0}\".format(flavour[\"template\"].format(version=version, archs=archs)),\n \"--enable-universalsdk={0}\".format(gSdkMap[archs]),\n \"--with-universal-archs={0}\".format(archs),\n ] + flavour[\"flags\"] + [\n \"MACOSX_DEPLOYMENT_TARGET={0}\".format(gDeploymentTargetMap[archs]),\n ], cwd=builddir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=os.environ)\n\n data = p.communicate()[0]\n\n xit = p.wait()\n if xit != 0:\n print(data.decode('utf-8').rstrip())\n lg.debug(\"Configure failed for %s\", version)\n raise ShellError(xit)\n \n lg.debug(\"Running 'make'\")\n p = subprocess.Popen([\n \"make\",\n ], cwd=builddir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=os.environ)\n data = p.communicate()[0]\n\n xit = p.wait()\n if xit != 0:\n print(data.decode('utf-8').rstrip())\n lg.debug(\"Make failed for %s\", version)\n raise ShellError(xit)\n\n lg.debug(\"Running 'make install'\")\n p = subprocess.Popen([\n \"make\",\n \"install\",\n ], cwd=builddir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=os.environ)\n data = p.communicate()[0]\n\n xit = p.wait()\n if xit != 0:\n print(data.decode('utf-8').rstrip())\n lg.debug(\"Install failed for %r\", version)\n raise ShellError(xit)\n\ndef install_distribute(flavour, version, archs):\n lg = logging.getLogger(\"install_distribute\")\n lg.debug(\"Installing distribute\")\n\n distribute_dir = os.path.join(gBaseDir, \"distribute-0.6.12-patched\")\n distribute_dir = os.path.join(gBaseDir, \"distribute-0.6.14\")\n builddir = os.path.join(distribute_dir, \"build\")\n if os.path.exists(builddir):\n lg.debug(\"Remove existing 'build' subdir\")\n shutil.rmtree(builddir)\n\n frameworkName=flavour[\"template\"].format(archs=archs, version=version)\n\n python = \"/Library/Frameworks/{0}.framework/Versions/{1}/bin/python\".format(\n frameworkName, version)\n if version[0] == '3':\n python += '3'\n\n\n if os.path.exists(os.path.join(distribute_dir, 'build')):\n shutil.rmtree(os.path.join(distribute_dir, 'build'))\n\n lg.debug(\"Run setup script with '%s'\", python)\n p = subprocess.Popen([\n python, \"setup.py\", \"install\"],\n cwd=distribute_dir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=os.environ)\n \n data = p.communicate()[0]\n\n xit = p.wait()\n if xit != 0:\n print(data.decode('utf-8').rstrip())\n lg.warning(\"Installing 'distribute' failed\")\n raise ShellError(xit)\n\n\ndef install_virtualenv(flavour, version, archs):\n lg = logging.getLogger(\"install_virtualenv\")\n\n lg.info(\"Installing virtualenv from local source\")\n\n frameworkName=flavour[\"template\"].format(archs=archs, version=version)\n\n python = \"/Library/Frameworks/{0}.framework/Versions/{1}/bin/python\".format(\n frameworkName, version)\n if version[0] == '3':\n python += '3'\n\n # Sadly enough plain virtualenv doens't support \n # python3 yet, but there is a fork that does.\n # Therefore install the real virtualenv for python 2.x\n # and the fork for python 3.x\n if version[0] == '2':\n srcdir = os.path.join(gBaseDir, 'virtualenv-src')\n else:\n srcdir = os.path.join(gBaseDir, 'virtualenv3-src')\n\n if os.path.exists(os.path.join(srcdir, 'build')):\n shutil.rmtree(os.path.join(srcdir, 'build'))\n\n p = subprocess.Popen([ python, \"setup.py\", \"install\" ],\n cwd=srcdir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=os.environ)\n\n data = p.communicate()[0]\n\n xit = p.wait()\n if xit != 0:\n print(data.decode('utf-8').rstrip())\n lg.warning(\"Installing 'virtualenv' failed\")\n raise ShellError(xit)\n\n\ndef main():\n logging.basicConfig(level=logging.DEBUG)\n\n try:\n opts, args = getopt.getopt(sys.argv[1:], 'v:a:h?f:', ['help', 'versions=', 'archs=', 'flavours=', 'flavors='])\n except getopt.error as msg:\n print(msg, file=sys.stderr)\n print(gUsage, file=sys.stderr)\n sys.exit(1)\n\n versions = sorted(gURLMap.keys())\n archs = gArchs\n flavours = gFlavours\n\n if args:\n print(\"Additional arguments\", file=sys.stderr)\n print(gUsage, file=sys.stderr)\n sys.exit(1)\n\n\n\n for k, v in opts:\n if k in ('-h', '-?', '--help'):\n print(gUsage)\n sys.exit(0)\n\n elif k in ('-v', '--versions'):\n versions = v.split(',')\n\n for v in versions:\n if v not in gURLMap:\n print(\"Unsupported python version: {0}\".format(v), \n file=sys.stderr)\n sys.exit(1)\n\n elif k in ('-a', '--archs'):\n archs = v.split(',')\n\n for v in archs:\n if v not in gArchs:\n print(\"Unsupported python architecture: {0}\".format(v), \n file=sys.stderr)\n sys.exit(1)\n\n elif k in ('-f', '--flavours', '--flavors'):\n flavours = [v.strip() for v in v.split(',')]\n for v in flavours:\n if v not in gFlavours:\n print(\"Unsupported python flavour: {0}\".format(v), \n file=sys.stderr)\n sys.exit(1)\n\n else:\n print(\"ERROR: unhandled script option: {0}\".format(k), \n file=sys.stderr)\n sys.exit(2)\n\n lg = logging.getLogger(\"build_frameworks\")\n lg.info(\"Building versions: %s\", versions)\n lg.info(\"Building architectures: %s\", archs)\n try:\n for version in sorted(versions):\n create_checkout(version)\n\n for flavour in flavours:\n for arch in sorted(archs):\n if arch not in gArchMap[version]:\n lg.info('Skip %s framework for python %s (%s)', flavour[\"name\"], version, arch)\n continue\n\n try:\n lg.info('Building %s framework for python %s (%s)', flavour[\"name\"], version, arch)\n build_framework(flavour, version, arch)\n lg.info('Installing distribute for python %s (%s)', version, arch)\n install_distribute(flavour, version, arch)\n lg.info('Installing virtualenv for python %s (%s)', version, arch)\n install_virtualenv(flavour, version, arch)\n lg.info('Done %s python %s (%s)', flavour[\"name\"], version, arch)\n except Exception as exc:\n lg.warning(\"building %s for pyton %s (%s) failed: %s\",\n flavour[\"name\"], version, arch, exc)\n import traceback\n traceback.print_exc()\n \n except ShellError:\n sys.exit(1)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Prototyping/scripts/trunk/pyobjc/build-support/build_frameworks.py","file_name":"build_frameworks.py","file_ext":"py","file_size_in_byte":11380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"39075275","text":"#定数リスト\n\n#app.pyを起動するディレクトリ\n__ROOT_DIR__= \"D:/development/python/BJ\"\n__RESOURCE_DIR__ = __ROOT_DIR__ + \"/resource\"\n\n#app設定ファイル\n__APP_CONF__ = __ROOT_DIR__ + \"/settings.conf\"\n__APP_SECTION__ = \"app\"\n\n#DBSingleton\n__DBSINGLETON_CONF__ = __APP_CONF__\n__DBSINGLETON_SECTION__ = \"DBSingleton\"\n\n#HexGrid\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"486348237","text":"'''Text classification using Naive Bayes'''\nfrom __future__ import with_statement\nfrom __future__ import print_function\n\nimport logging\nimport numpy as np\nfrom optparse import OptionParser\nimport sys, os, codecs\nfrom time import time\n\nfrom sklearn.feature_selection import SelectKBest, chi2\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import HashingVectorizer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn import metrics\n\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.corpus import stopwords\n\nfrom classifiers import NaiveBayes\nfrom classifiers import GaussianNaiveBayes\nfrom preprocess import readData\nfrom postprocess import writeResults\nfrom cross_validation import CrossValidate\n\n################################################################################\n# logging and options\n################################################################################\n\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(levelname)s %(message)s')\n\nop = OptionParser()\nop.add_option(\"--report\",\n action=\"store_true\", dest=\"print_report\",\n help=\"Print a detailed classification report.\")\nop.add_option(\"--chi2_select\",\n action=\"store\", type=\"int\", dest=\"select_chi2\",\n help=\"Select some number of features using a chi-squared test\")\nop.add_option(\"--confusion_matrix\",\n action=\"store_true\", dest=\"print_cm\",\n help=\"Print the confusion matrix.\")\nop.add_option(\"--top10\",\n action=\"store_true\", dest=\"print_top10\",\n help=\"Print ten most discriminative terms per class\"\n \" for every classifier.\")\nop.add_option(\"--max_n_gram\",\n action=\"store\", type=\"int\", dest=\"max_n_gram_length\",\n help=\"The maximum n-gram size to be used.\")\nop.add_option(\"--use_tf_idf\",\n action=\"store_true\",\n help=\"If set, tf-idf term weighting will be used.\")\nop.add_option(\"--lowercase\",\n action=\"store_true\",\n help=\"If set, the documents will be converted to lowercase.\")\nop.add_option(\"--lemmatize\",\n action=\"store_true\",\n help=\"If set, all words will be lemmatized.\")\nop.add_option(\"--remove_stop_words\",\n action=\"store_true\",\n help=\"If set, sklearn's list of English stop words will be removed.\")\nop.add_option(\"--test\",\n action=\"store\", type=\"float\", dest=\"test_fraction\",\n help=\"Run on a fraction of the entire training corpus\")\nop.add_option(\"--use_hashing\",\n action=\"store_true\",\n help=\"Use a hashing vectorizer.\")\nop.add_option(\"--n_features\",\n action=\"store\", type=int, default=2 ** 16,\n help=\"n_features when using the hashing vectorizer.\")\nop.add_option(\"--gaussian\",\n action=\"store_true\",\n help=\"If set, features will be treated as continuous random variables with Gaussian distributions\")\nop.add_option(\"--use_scikit\",\n action=\"store_true\",\n help=\"If set, use scikit's Gaussin naive bayes implementation\")\nop.add_option(\"--predict\",\n action=\"store_true\",\n help=\"If set, predictions will be made for the unknown test data\")\nop.add_option(\"--cv_range\",\n action=\"store\", type=int, nargs=3, dest=\"cv_range\",\n help=\"Three positive integers separated by spaces where the first and second are equal to the start and end of the range, inclusive, and the middle is equal to the step size\")\nop.add_option(\"--devset\",\n action=\"store_true\",\n help=\"If set, accuracy will be measured against a 30 percent dev set. Cannot be used in tandem with --cv_range.\")\n\n(opts, args) = op.parse_args()\nif len(args) > 0:\n op.error(\"This script takes no arguments.\")\n sys.exit(1)\n\nif opts.max_n_gram_length:\n if opts.max_n_gram_length < 1:\n op.error(\"Max n-gram length must be positive\")\n sys.exit()\n\nif opts.test_fraction:\n if opts.test_fraction > 1.0 or opts.test_fraction < 0.0:\n op.error(\"The test fraction must be between 0.0 and 1.0\")\n sys.exit(1)\n\nif opts.cv_range:\n start, end, step = opts.cv_range\n if start < 0 or start > end or step < 1:\n op.error(\"Invalid range\")\n sys.exit(1)\n\nif opts.cv_range and opts.devset:\n op.error(\"Can only use one of cross validation or a develpoment set\")\n sys.exit(1)\n\nprint(__doc__)\nop.print_help()\nprint()\n\nclass LemmaTokenizer(object):\n def __init__(self):\n self.wnl = WordNetLemmatizer()\n\n def __call__(self, doc):\n return [self.wnl.lemmatize(word) for word in word_tokenize(doc)]\n\n################################################################################\n# Helpers\n################################################################################\n\ndef size_mb(docs):\n return sum(len(s.encode('utf-8')) for s in docs) / 1e6\n\ndef selectChi2(X_train, y_train, X_test, k, feature_names=None):\n print(\"Extracting %d best features by a chi-squared test\" % k)\n t0 = time()\n # the SelectKBest object is essentially a vectorizer that will select only the most influential k features of your input vectors\n ch2 = SelectKBest(chi2, k=k)\n X_train = ch2.fit_transform(X_train, y_train)\n X_test = ch2.transform(X_test) # revectorize X_test\n if feature_names:\n # keep selected feature names\n feature_names = [feature_names[i] for i\n in ch2.get_support(indices=True)]\n print(\"done in %fs\" % (time() - t0))\n print(\"n_samples: %d, n_features: %d\" % X_test.shape)\n print()\n return X_train, X_test, feature_names\n\ndef selectChi2Cv(X_train, y_train, k):\n print(\"Extracting %d best features by a chi-squared test\" % k)\n t0 = time()\n # the SelectKBest object is essentially a vectorizer that will select only the most influential k features of your input vectors\n ch2 = SelectKBest(chi2, k=k)\n X_train = ch2.fit_transform(X_train, y_train)\n print(\"done in %fs\" % (time() - t0))\n print(\"n_samples: %d, n_features: %d\" % X_train.shape)\n print()\n return X_train\n\ndef crossValidate(X_train, y_train, clf, rng):\n '''Return a an array of tuples: (# features used, avg prediction accuracy)'''\n arr = []\n if type(clf) == \"GuassianNB\":\n accuracyFunc = metrics.accuracy_score\n else:\n accuracyFunc = clf.getAccuracy\n for numFeats in rng:\n X_t = selectChi2Cv(X_train, y_train, numFeats)\n crossValidator = CrossValidate(X_t, y_train, clf, accuracyFunc)\n acc = crossValidator.crossValidate()\n arr.append((numFeats, acc))\n return arr\n\ndef makePredictions(X_train, y_train, X_test, clf):\n print('_' * 80)\n print(\"Training: \")\n print(clf)\n t0 = time()\n clf.fit(X_train, y_train)\n train_time = time() - t0\n print(\"train time: %0.3fs\" % train_time)\n print()\n t0 = time()\n pred = clf.predict(X_test)\n test_time = time() - t0\n print(\"test time: %0.3fs\" % test_time)\n print()\n return pred\n\ndef getXTrain(docs_train):\n maxNGramLength = 1\n if opts.max_n_gram_length:\n maxNGramLength = opts.max_n_gram_length\n print(\"Using n-grams of up to %d words in length\" % maxNGramLength)\n\n if opts.lowercase:\n lowercase = True\n print(\"Converting all text to lowercase\")\n else:\n lowercase = False\n\n if opts.lemmatize:\n tokenizer = LemmaTokenizer()\n print(\"Lemmatizing all words\")\n else:\n tokenizer = None\n\n if opts.remove_stop_words:\n stop_words = 'english'\n print(\"Using stop words\")\n else:\n stop_words = None\n\n print(\"Extracting features from the training data using a sparse vectorizer\")\n t0 = time()\n if opts.use_hashing:\n vectorizer = HashingVectorizer(lowercase=lowercase, tokenizer=tokenizer, stop_words=stop_words,\n ngram_range=(1,maxNGramLength), non_negative=True, n_features=opts.n_features)\n X_train = vectorizer.transform(docs_train)\n elif opts.use_tf_idf:\n # a way to re-weight the count features such that extremely common words such\n # as \"the\" and \"a\" are less important than the less common ones\n print(\"Extracting features from the test data using a tfidf vectorizer\")\n vectorizer = TfidfVectorizer(lowercase=lowercase, tokenizer=tokenizer, stop_words=stop_words,\n ngram_range=(1,maxNGramLength))\n X_train = vectorizer.fit_transform(docs_train)\n else:\n print(\"Extracting features from the test data using a count vectorizer\")\n vectorizer = CountVectorizer(lowercase=lowercase, tokenizer=tokenizer, stop_words=stop_words,\n ngram_range=(1,maxNGramLength))\n X_train = vectorizer.fit_transform(docs_train)\n duration = time() - t0\n print(\"done in %fs at %0.3fMB/s\" % (duration, data_train_size_mb / duration))\n print(\"n_samples: %d, n_features: %d\" % X_train.shape)\n print()\n if opts.use_scikit:\n X_train = X_train.toarray()\n return X_train, vectorizer\n\ndef getXTest(docs_test, vectorizer):\n print(\"Extracting features from the development data using the same vectorizer\")\n t0 = time()\n X_test = vectorizer.transform(docs_test)\n duration = time() - t0\n print(\"done in %fs at %0.3fMB/s\" % (duration, data_test_size_mb / duration))\n print(\"n_samples: %d, n_features: %d\" % X_test.shape)\n print()\n if opts.use_scikit:\n X_test = X_test.toarray() # required for GuassianNB implementation\n return X_test\n\ndef benchmark(X_train, y_train, X_dev, y_dev):\n if opts.use_scikit:\n X_train = X_train.toarray()\n X_dev = X_dev.toarray()\n print('_' * 80)\n print(\"Training: \")\n print(clf)\n t0 = time()\n clf.fit(X_train, y_train)\n train_time = time() - t0\n print(\"train time: %0.3fs\" % train_time)\n print()\n\n t0 = time()\n pred = clf.predict(X_dev)\n test_time = time() - t0\n print(\"test time: %0.3fs\" % test_time)\n print()\n\n # get the accuracy of the predictions against the train data\n score = metrics.accuracy_score(y_dev, pred)\n print(\"accuracy: %0.3f\" % score)\n print()\n else:\n print(\"Training on training set\")\n t0 = time()\n clf.fit(X_train, y_train)\n dur = time() - t0\n print(\"completed training in %fs\" % dur)\n print()\n print(\"Predicting on development set\")\n t0 = time()\n pred = clf.predict(X_dev)\n dur = time() - t0\n print(\"completed predictions in %fs\" % dur)\n print()\n accuracy = clf.getAccuracy(y_dev, pred)\n print(\"Accuracy:\")\n print(accuracy)\n print()\n\ndef printAccuracies(accs):\n print(\"# features\\taccuracy\")\n for (numFeats, acc) in accs:\n print(\"%d\\t%f\" % (numFeats, acc))\n\ndef printVector(x):\n x = x.toarray()\n countPositive = 0\n countOverOne = 0\n for v in x[0]:\n if v > 0.0:\n countPositive += 1\n if v >= 1.0:\n countOverOne += 1\n print(countPositive)\n print(countOverOne)\n\nif __name__ == \"__main__\":\n\n ################################################################################\n # data loading\n ################################################################################\n\n all_docs_train, all_y_train, docs_test = readData()\n print(\"data loaded\")\n\n # If we train on only a fraction of the data, we will need to know this\n training_on_data_fraction = False\n\n ################################################################################\n # optionally extract only a portion of data for training\n ################################################################################\n\n if opts.test_fraction:\n training_on_data_fraction = True\n percent = (opts.test_fraction * 100.0)\n print(\"Using only %.f percent of the training data\" % percent)\n threshold = int(opts.test_fraction * len(all_docs_train))\n if threshold == 0:\n print(\"Fraction too small, please choose a larger fraction\")\n print()\n sys.exit(1)\n docs_train = all_docs_train[:threshold]\n y_train = all_y_train[:threshold]\n else:\n docs_train = all_docs_train\n y_train = all_y_train\n print(\"Train set size: %d documents\" % len(docs_train))\n print(\"Test set size: %d documents\" % len(docs_test))\n print(\"done\")\n print()\n\n data_train_size_mb = size_mb(docs_train)\n data_test_size_mb = size_mb(docs_test)\n print(\"%d abstracts - %0.3fMB (training set)\" % (\n len(docs_train), data_train_size_mb))\n print(\"%d abtracts - %0.3fMB (test set)\" % (\n len(docs_test), data_test_size_mb))\n print()\n\n # define the categories\n categories = [\n 'stats',\n 'math',\n 'physics',\n 'cs'\n ]\n\n ################################################################################\n # extract a development set\n ################################################################################\n if opts.devset:\n # if we are not cross validating, we may still select a development set to evaluate performance (albeit more crudely)\n training_on_data_fraction = True\n print(\"Extracting development set from training set\")\n docs_train, docs_dev, y_train, y_dev = train_test_split(docs_train, y_train, test_size=0.3, random_state=0)\n print(\"Using %d training examples and %d testing examples\" % (len(docs_train), len(docs_dev)))\n print(\"done\")\n print()\n\n data_dev_size_mb = size_mb(docs_dev)\n print(\"%d abstracts - %0.3fMB (development set)\" % (\n len(docs_dev), data_dev_size_mb))\n print()\n\n ################################################################################\n # vectorize the training data\n ################################################################################\n\n X_train, vectorizer = getXTrain(docs_train)\n\n feature_names = vectorizer.get_feature_names()\n\n if opts.devset:\n X_dev = getXTest(docs_dev, vectorizer)\n\n ################################################################################\n # classification and prediction\n ################################################################################\n\n if opts.gaussian:\n clf = GaussianNaiveBayes()\n elif opts.use_scikit:\n clf = GaussianNB()\n else:\n clf = NaiveBayes()\n\n # either cross validate over a range of numbers of features, or determine the\n # performance on the development set for all or just some features (no cross validation)\n if opts.cv_range:\n print(\"Cross validating to find the best number of features in the provided range\")\n # Cross validate over the range of numbers of features\n start, end, step = opts.cv_range\n rng = range(start, end+1, step)\n accuracies = crossValidate(X_train, y_train, clf, rng)\n\n # print out the accuracies\n print(\"Summary of accuracies:\")\n printAccuracies(accuracies)\n print()\n\n bestAcc = 0\n bestNumFeats = 0\n for (numFeats, acc) in accuracies:\n if acc > bestAcc:\n bestAcc = acc\n bestNumFeats = numFeats\n print(\"Best number of features: %d\" % bestNumFeats)\n print()\n numFeatsToPredictOn = bestNumFeats\n elif opts.devset:\n print(\"Gauging model performance against development set\")\n if opts.select_chi2:\n # we didn't specify a range. But we still might want to select only\n # the top k features to make predictions against the development set\n X_train, X_dev, feature_names = selectChi2(X_train, y_train, X_dev, opts.select_chi2, feature_names)\n # covert feature_names to an ndarray\n feature_names = np.asarray(feature_names)\n benchmark(X_train, y_train, X_dev, y_dev)\n numFeatsToPredictOn = opts.select_chi2\n else:\n print(\"Using all features\")\n # use all the features to make predictions against the development set\n benchmark(X_train, y_train, X_dev, y_dev)\n numFeatsToPredictOn = -1\n else:\n # just in case we don't train or validate but still want to predict\n if opts.select_chi2:\n numFeatsToPredictOn = opts.select_chi2\n else:\n numFeatsToPredictOn = -1\n\n if opts.predict:\n print(\"Making predictions!!!\")\n # make predictions for docs_test\n if training_on_data_fraction:\n # revectorize on ALL the data\n X_train, vectorizer = getXTrain(all_docs_train)\n\n X_test = getXTest(docs_test, vectorizer)\n\n if numFeatsToPredictOn != -1:\n print(\"Selecting the best %d features\" % numFeatsToPredictOn)\n X_train, X_test, feature_names = selectChi2(X_train, y_train, X_test, numFeatsToPredictOn, feature_names)\n\n print(\"Making predictions\")\n pred = makePredictions(X_train, y_train, X_test, clf)\n writeResults(pred, \"naive_bayes\")\n\n'''\n if opts.use_scikit:\n X_train = X_train.toarray()\n X_dev = X_dev.toarray()\n print('_' * 80)\n print(\"Training: \")\n print(clf)\n t0 = time()\n clf.fit(X_train, y_train)\n train_time = time() - t0\n print(\"train time: %0.3fs\" % train_time)\n print()\n\n t0 = time()\n pred = clf.predict(X_dev)\n test_time = time() - t0\n print(\"test time: %0.3fs\" % test_time)\n print()\n\n # get the accuracy of the predictions against the train data\n score = metrics.accuracy_score(y_dev, pred)\n print(\"accuracy: %0.3f\" % score)\n print()\n else:\n print(\"Training on training set\")\n t0 = time()\n clf.train(X_train, y_train)\n dur = time() - t0\n print(\"completed training in %fs\" % dur)\n print()\n print(\"Predicting on development set\")\n t0 = time()\n pred = clf.predict(X_dev)\n dur = time() - t0\n print(\"completed predictions in %fs\" % dur)\n print()\n accuracy = clf.getAccuracy(y_dev, pred)\n print(\"Accuracy:\")\n print(accuracy)\n print()\n'''\n","sub_path":"naive_bayes.py","file_name":"naive_bayes.py","file_ext":"py","file_size_in_byte":18649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"460375864","text":"import os\nimport sys\nimport argparse\nimport ConfigParser\n\nfrom fabric.api import local\n\nfrom contrail_provisioning.common.base import ContrailSetup\nfrom contrail_provisioning.vcenter_plugin.templates import contrail_vcenter_plugin_conf\n\n\nclass VcenterPluginSetup(ContrailSetup):\n def __init__(self, args_str = None):\n super(VcenterPluginSetup, self).__init__()\n self._args = None\n if not args_str:\n args_str = ' '.join(sys.argv[1:])\n\n self.global_defaults = {\n 'vcenter_url': 'https://127.0.0.1/sdk',\n 'api_hostname': '127.0.0.1',\n 'api_port': 8082,\n 'zookeeper_serverlist': '127.0.0.1:2181',\n }\n\n self.parse_args(args_str)\n\n def parse_args(self, args_str):\n '''\n Eg. setup-vcenter-plugin --vcenter_url https://10.84.24.111/sdk \n --api_port 8082 --api_hostname 10.1.5.11 \n '''\n\n parser = self._parse_args(args_str)\n parser.add_argument(\"--vcenter_url\", help = \"URL of vcenter node\")\n parser.add_argument(\"--vcenter_username\", help = \"vcenter login username\")\n parser.add_argument(\"--vcenter_password\", help = \"vcenter login password\")\n parser.add_argument(\"--vcenter_datacenter\", help = \"vcenter datacenter name\")\n parser.add_argument(\"--vcenter_dvswitch\", help = \"vcenter dvswitch name\")\n parser.add_argument(\"--vcenter_ipfabricpg\", help = \"vcenter ipfabric port group\")\n parser.add_argument(\"--api_hostname\", help = \"IP Address of the config node\")\n parser.add_argument(\"--api_port\", help = \"Listen port for api server\", type = int)\n parser.add_argument(\"--zookeeper_serverlist\", help = \"List of zookeeper ip:port\")\n self._args = parser.parse_args(self.remaining_argv)\n\n def fixup_config_files(self):\n self.fixup_contrail_vcenter_plugin()\n\n def fixup_contrail_vcenter_plugin(self):\n vcenter_full_url = \"https://\"+self._args.vcenter_url+\"/sdk\"\n template_vals = {'__contrail_vcenter_url__' : vcenter_full_url,\n '__contrail_vcenter_username__' : self._args.vcenter_username,\n '__contrail_vcenter_password__' : self._args.vcenter_password,\n '__contrail_vcenter_datacenter__' : self._args.vcenter_datacenter,\n '__contrail_vcenter_dvswitch__' : self._args.vcenter_dvswitch,\n '__contrail_vcenter_ipfabricpg__' : self._args.vcenter_ipfabricpg,\n '__contrail_api_hostname__' : self._args.api_hostname,\n '__contrail_zookeeper_serverlist__' : self._args.zookeeper_serverlist,\n '__contrail_api_port__' : self._args.api_port\n }\n self._template_substitute_write(contrail_vcenter_plugin_conf.template,\n template_vals, self._temp_dir_name + '/contrail-vcenter-plugin.conf')\n local(\"sudo mv %s/contrail-vcenter-plugin.conf /etc/contrail/contrail-vcenter-plugin.conf\" %(self._temp_dir_name))\n\n def run_services(self):\n local(\"sudo vcenter-plugin-setup.sh\")\n\n def setup(self):\n self.fixup_contrail_vcenter_plugin() \n self.run_services() \n\n#end class VcenterPluginSetup\ndef main(args_str = None):\n vcenterplugin = VcenterPluginSetup(args_str)\n vcenterplugin.setup()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tools/provisioning/contrail_provisioning/vcenter_plugin/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"458543374","text":"def get_pivot_position(array,start,end):\n i=end+1\n for j in range(start+1,end+1):\n if array[j]>array[start]:\n i-=1\n array[i],array[j]=array[j],array[i]\n array[i-1],array[start]=array[start],array[i-1]\n return i-1\n\ndef quick_sort(array,start,end):\n if start 10):\n runCount += 1\n durationSum += duration\n\n print(\"On average, it takes %f sec over %d runs.\" % (durationSum / runCount, runCount))\n print(humans)\n\nif __name__ == '__main__':\n tf.app.run()","sub_path":"tomTest/client_test.py","file_name":"client_test.py","file_ext":"py","file_size_in_byte":3260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"341262400","text":"# PySide import\nfrom PySide import QtGui, QtCore, __version__\n\n# python imports\nimport os\nimport sys\nimport json\nimport logging\nimport inspect\nfrom os.path import basename, dirname, join\n\nif sys.platform == 'win32':\n from os import startfile\nelse:\n from os import system\n\n def startfile(path):\n return system('open ' + path)\n\n# hfx gui imports\nimport instance\n\n\n__all__ = [\n 'ConvertToHFX',\n 'applyHFXStyle',\n 'Vertical',\n 'Horizontal',\n 'guiKitVersion',\n 'allWidgets',\n 'findWidgetType'\n]\n\n# store the widget dict\n_ALL_WIDGETS = {}\nlogger = logging.getLogger('HFX')\n\n\ndef guiKitVersion():\n \"\"\"\n Get the version of PySide that the gui kit is using.\n :return:\n \"\"\"\n logger.debug(' -- PySide -- ')\n logger.debug('\\tVersion: ' + __version__)\n logger.debug('\\tCompiled with: Qt ' + QtCore.__version__)\n logger.debug('\\tUsing: Qt ' + QtCore.qVersion())\n\n\n# layout presets\ndef Vertical(*args):\n layout = QtGui.QVBoxLayout(*args)\n layout.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)\n return layout\n\ndef Horizontal(*args):\n layout = QtGui.QHBoxLayout(*args)\n layout.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)\n return layout\n\ndef allWidgets():\n \"\"\"\n Loop over all open HFX widgets.\n :return:\n \"\"\"\n return _ALL_WIDGETS\n\ndef findWidgetType(widgetPath):\n \"\"\"\n Returns a list of widgets that math the application type\n :param widgetPath:\n :return:\n \"\"\"\n widgets = []\n\n for widget in _ALL_WIDGETS[widgetPath]:\n try:\n widgets.append(widget)\n except:\n continue\n\n return widgets\n\nclass LayoutWrapper(QtGui.QWidget):\n \"\"\"\n Layout wrapper widget for HFX.\n \"\"\"\n\n\nclass ConvertToHFX(object):\n \"\"\"\n HFX object that handles most layout manipulations and tedious override functions from PySide.\n \"\"\"\n\n Divider = 0\n\n @staticmethod\n def getCompleteUI(widget):\n \"\"\"\n Determines if the widget should be added as a layout or as a single widget.\n :param widget:\n :return:\n \"\"\"\n if isinstance(widget.parent(), LayoutWrapper):\n return widget.parent()\n elif widget.layout() is not None:\n return widget.layout()\n else:\n return widget\n\n def __init__(self, label=None, layout=None, searchable=False, private=False):\n instance.launchPrep(self, '')\n super(ConvertToHFX, self).__init__()\n\n global _ALL_WIDGETS\n\n if not layout:\n layout = Vertical\n else:\n self.setLayout(layout())\n\n self._defaultLayout = layout\n self._hfxLayout = None\n self._wrapper = None\n self._functions = {}\n self._functionOrder = []\n self._name = ''\n self._searchBar = None\n self._label = None\n self._showSearch = False\n\n # applyHFXStyle(self)\n\n self.setMouseTracking(True)\n\n # add the label if one is provided.\n if label:\n self._label = QtGui.QLabel(self)\n self._label.setText(label)\n self._label.hide()\n\n if searchable:\n self._searchBar = QtGui.QLineEdit(self)\n self._searchBar.setPlaceholderText('Search...')\n self._searchBar.setMaximumWidth(300)\n self._searchBar.textChanged.connect(self.search)\n self._searchBar.hide()\n\n if not private:\n self._private = private\n\n if not self.fullClassName() in _ALL_WIDGETS:\n _ALL_WIDGETS[self.fullClassName()] = []\n\n _ALL_WIDGETS[self.fullClassName()].append(self)\n\n def keyPressEvent(self, event):\n if event.key() == QtCore.Qt.Key_Alt:\n if not self._showSearch:\n if self._label:\n self._label.show()\n if self._searchBar:\n self._searchBar.show()\n self._searchBar.setFocus()\n\n self._showSearch = True\n else:\n if self._label:\n self._label.hide()\n if self._searchBar:\n self._searchBar.hide()\n self._searchBar.clearFocus()\n\n self._showSearch = False\n\n # find the correct PySide parent class for painting\n if len(type(self).__bases__) == 1:\n type(self).__bases__[0].__bases__[1].keyPressEvent(self, event)\n return\n else:\n for base in type(self).__bases__:\n if issubclass(base, ConvertToHFX):\n continue\n\n if issubclass(base, QtGui.QWidget):\n base.keyPressEvent(self, event)\n return\n\n def mousePressEvent(self, event):\n if self._showSearch:\n if self._searchBar:\n if self._searchBar.text() == '':\n self._searchBar.hide()\n self._searchBar.clearFocus()\n\n if self._label:\n self._label.hide()\n\n self._showSearch = False\n\n if len(type(self).__bases__) == 1:\n type(self).__bases__[0].__bases__[1].mousePressEvent(self, event)\n return\n else:\n for base in type(self).__bases__:\n if issubclass(base, ConvertToHFX):\n continue\n\n if issubclass(base, QtGui.QWidget):\n base.mousePressEvent(self, event)\n return\n\n def leaveEvent(self, event):\n if self._label:\n self._label.hide()\n if self._searchBar:\n self._searchBar.hide()\n\n # find the correct PySide parent class for painting\n if len(type(self).__bases__) == 1:\n type(self).__bases__[0].__bases__[1].leaveEvent(self, event)\n return\n else:\n for base in type(self).__bases__:\n if issubclass(base, ConvertToHFX):\n continue\n\n if issubclass(base, QtGui.QWidget):\n base.leaveEvent(self, event)\n return\n\n def enterEvent(self, event):\n if self._showSearch:\n if self._label:\n self._label.show()\n if self._searchBar:\n self._searchBar.show()\n self._searchBar.setFocus()\n\n # find the correct PySide parent class for painting\n if len(type(self).__bases__) == 1:\n type(self).__bases__[0].__bases__[1].enterEvent(self, event)\n return\n else:\n for base in type(self).__bases__:\n if issubclass(base, ConvertToHFX):\n continue\n\n if issubclass(base, QtGui.QWidget):\n base.enterEvent(self, event)\n return\n\n def paintEvent(self, event):\n margins = [8, 8]\n\n sizeX = self.size().width()/2\n sizeY = self.size().height()/2\n\n if self._label:\n sizeY = (sizeY + (self._label.size().height()/2)) + margins[1]\n self._label.move((sizeX - margins[0]) - (self._label.size().width()/2), sizeY)\n\n if self._searchBar:\n self._searchBar.move(\n (sizeX - margins[0]) - (self._searchBar.size().width()/2),\n (sizeY + (self._searchBar.size().height()/2) + margins[1])\n )\n\n # find the correct PySide parent class for painting\n if len(type(self).__bases__) == 1:\n type(self).__bases__[0].__bases__[1].paintEvent(self, event)\n return\n else:\n for base in type(self).__bases__:\n if issubclass(base, ConvertToHFX):\n continue\n\n if issubclass(base, QtGui.QWidget):\n base.paintEvent(self, event)\n return\n\n def isHFX(self):\n return True\n\n def setKeywords(self, keywordList):\n \"\"\"\n Add a list of keywords to the search bar\n :param keywordList:\n :return:\n \"\"\"\n if self._searchBar:\n self._searchBar.setCompleter(QtGui.QCompleter(keywordList))\n\n def searchWord(self):\n \"\"\"\n Get the current search bar value\n :return:\n \"\"\"\n if self._searchBar is None:\n return ''\n return self._searchBar.text()\n\n def search(self, word=None):\n \"\"\"\n Interface\n Override for different widget types: List, Tree, etc.\n :return:\n \"\"\"\n raise NotImplementedError\n\n def loudMouthHandler(self, lmData):\n \"\"\"\n Interface\n Override for different widget types: List, Tree, etc.\n :return:\n \"\"\"\n raise NotImplementedError\n\n def loudMouthSender(self, lmData, sendTo):\n \"\"\"\n Interface\n Override for different widget types: List, Tree, etc.\n :return:\n \"\"\"\n # import HFX\n import HFX\n\n data = json.dumps(lmData)\n\n lm = HFX.Loudmouth()\n lm.send_message(['%%hfxw%%', self.fullClassName(), data], sendTo.lower())\n\n def name(self):\n \"\"\"\n Get the easy name for this widget.\n :return:\n \"\"\"\n if self._name == '':\n return self.__class__.__name__\n else:\n return self._name\n\n def functions(self):\n \"\"\"\n Grab the dict that stores the widgets functions\n :return: list of the functions order, dict pointing to the functions\n \"\"\"\n funcOrder = []\n funcDict = {}\n for function in self._functionOrder:\n if function == '-$-':\n continue\n funcOrder.append(function)\n funcDict[function] = self._functions[function]\n\n return funcOrder, funcDict\n\n def layoutWrapper(self):\n return self._wrapper\n\n def setMaxWidth(self, width):\n if self._wrapper is None:\n self.setMaximumWidth(width)\n else:\n self._wrapper.setMaximumWidth(width)\n\n def setMaxHeight(self, height):\n if self._wrapper is None:\n self.setMaximumHeight(height)\n else:\n self._wrapper.setMaximumHeight(height)\n\n def addFunction(self, path, function):\n \"\"\"\n Add a right click function to this widget.\n :param path:\n :param function:\n :return:\n \"\"\"\n self._functionOrder.append(path)\n self._functions[path] = function\n\n def addSeparator(self, path=None):\n \"\"\"\n Add a separator to the provided path in the context menu.\n :param path:\n :return:\n \"\"\"\n if path is None:\n self._functionOrder.append('-$-')\n return\n\n if '/' in path:\n # add a slash at the end of the path if it doesnt exist.\n if not path.endswith('/'):\n path += '/'\n self._functionOrder.append(path + '-$-')\n else:\n self._functionOrder.append('-$-')\n\n def contextMenuEvent(self, event, autoExecute=True):\n \"\"\"\n --private--\n :param event:\n :return:\n \"\"\"\n # menu obj\n contextMenu = QtGui.QMenu(self)\n\n # menu mapping\n menuMap = {}\n\n # loop over registered functions\n for functionPath in self._functionOrder:\n # check if there is a '/' in the function path\n if '/' in functionPath:\n # get the actions name based on the ending name in the path\n actionName = basename(functionPath)\n fullPath = dirname(functionPath)\n menu = None\n pathSplit = functionPath.split('/')\n for pathPart in pathSplit:\n # exclude leading slash if there is one.\n if pathPart == '':\n continue\n\n # pull the current menu path\n menuPath = '/'.join(pathSplit[:pathSplit.index(pathPart) + 1]).rstrip('/')\n\n if pathPart == actionName and dirname(menuPath) == fullPath:\n # add orphan actions. This is happens if the programmer registers a function like\n # obj.addFunction('/test', testFunc). This will cause the leading slash to register as an empty\n # string and will have no parent menu. Basically means this is a top level action.\n if menu is None:\n # internally, HFX assigns '-$-' as a flag for separators. If this is the path parts value,\n # it will add a separator to the menu in question.\n if pathPart == '-$-':\n contextMenu.addSeparator()\n else:\n action = contextMenu.addAction(pathPart)\n action.triggered.connect(self._functions[functionPath])\n # if there is a menu assigned, add the new action to this menu instead.\n else:\n if pathPart == '-$-':\n menu.addSeparator()\n else:\n action = menu.addAction(pathPart)\n action.triggered.connect(self._functions[functionPath])\n # go through and find the menu that this path is nested in.\n else:\n # create the sub menu if it doesn't exist in the menu map.\n if menuPath not in menuMap:\n if menu is None:\n menu = contextMenu.addMenu(pathPart)\n else:\n menu = menu.addMenu(pathPart)\n menuMap[menuPath] = menu\n else:\n menu = menuMap[menuPath]\n else:\n if functionPath == '-$-':\n contextMenu.addSeparator()\n else:\n # create the action and connect it to the registered function\n action = contextMenu.addAction(functionPath)\n action.triggered.connect(self._functions[functionPath])\n\n contextMenu.addSeparator()\n # check if the studio has set up the HFX docs location.\n if 'HFX_DOCS' in os.environ:\n helpMenu = contextMenu.addMenu('Help')\n docs = helpMenu.addAction('Documentation')\n docs.triggered.connect(self.viewDocs)\n\n if not self._private:\n # class source editor\n contextMenu.addSeparator()\n hfxMenu = contextMenu.addMenu('HFX Dev Tools')\n source = hfxMenu.addAction('Open Source')\n source.triggered.connect(self.showSource)\n sourceDirectory = hfxMenu.addAction('Open Source Directory')\n sourceDirectory.triggered.connect(self.openInFileSystem)\n\n self.addSeparator()\n\n if autoExecute:\n # execute the menu\n contextMenu.exec_(event.globalPos())\n else:\n return contextMenu, event\n\n def documentationPath(self):\n \"\"\"\n Get the path to the classes documentation.\n :return:\n \"\"\"\n return join(os.environ['HFX_DOCS'], self.fullClassName() + '.hfxdoc')\n\n def fullClassName(self):\n \"\"\"\n Returns the full path to the class.\n :return:\n \"\"\"\n return type(instance.OPEN_APPLICATION).__name__ + '.' + type(self).__module__ + '.' + type(self).__name__\n\n def viewDocs(self):\n \"\"\"\n :return:\n \"\"\"\n import HFX\n editor = HFX.DocEditor(self)\n editor.show()\n\n def alignment(self):\n \"\"\"\n Returns the alignment you want to have for your widget. By default you get it aligned to top left.\n :return:\n \"\"\"\n return QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft\n\n def connectTo(self, function, *args):\n \"\"\"\n Pass a function with a list of signals you want to tie it to or pass a signal you want to tie a list of\n functions to.\n :param function:\n :param args:\n :return:\n \"\"\"\n if isinstance(function, QtCore.Signal):\n for func in args:\n function.connect(func)\n else:\n for signal in args:\n signal.connect(function)\n\n def addWidget(self, widget, index=None):\n \"\"\"\n Add a widget to this widgets layout. If this widget has no layout, it will find the parent layout that this widget\n belongs to add to that.\n :param index: Index of where you would like to put the widget.\n :param widget: Widget object.\n :return:\n \"\"\"\n # if this widget does not have an already existing layout.\n if not self.layout():\n # find the parent widgets layout.\n # search variables.\n parentLayout = None\n parent = self\n\n # begin search.\n while 1:\n # extract layout.\n parentLayout = parent.layout()\n if parentLayout is not None:\n # break out of the while loop if it found the parent layout.\n break\n\n # grab the next parent.\n parent = parent.parent()\n if parent is None:\n # break out of the while loop if there is no parent yet for this widget.\n break\n\n # check if it found a layout. If it did not, create and assign the default layout.\n if parentLayout is None and self._defaultLayout is not None:\n # construct a wrapper widget for the layout.\n self._wrapper = LayoutWrapper()\n # init the layout\n parentLayout = self._defaultLayout(self._wrapper)\n # set your layouts alignment\n parentLayout.setAlignment(self.alignment())\n # add this widget to the containing layout\n parentLayout.addWidget(self)\n\n # raise an HFX layout error if there is no layout found.\n if not parentLayout:\n raise HFXLayoutError('There was no parent layout found and no default layout to assign.')\n else:\n # grab the existing layout.\n parentLayout = self.layout()\n\n if widget == self.Divider:\n import HFX\n if isinstance(parentLayout, QtGui.QHBoxLayout):\n parentLayout.addWidget(HFX.VerticalDivider())\n elif isinstance(parentLayout, QtGui.QVBoxLayout):\n parentLayout.addWidget(HFX.HorizontalDivider())\n return\n else:\n ui = self.getCompleteUI(widget)\n if isinstance(ui, QtGui.QLayout):\n ui = ui.parentWidget()\n if index is not None:\n parentLayout.insertWidget(index, ui)\n else:\n parentLayout.addWidget(ui)\n\n def addLayoutDivider(self):\n self.addWidget(self.Divider)\n\n def showFunctionSource(self, func):\n \"\"\"\n Opens a functions source code\n :param func:\n :return:\n \"\"\"\n module = inspect.getmodule(func)\n if not module:\n self.showSource()\n return\n startfile(module.__file__)\n\n def showSource(self):\n \"\"\"\n Opens the HFX widgets source code in the respective editor\n :return:\n \"\"\"\n startfile(sys.modules[self.__module__].__file__)\n\n def openInFileSystem(self):\n \"\"\"\n Opens the directory that the source code lives in.\n :return:\n \"\"\"\n startfile(dirname(sys.modules[self.__module__].__file__))\n\n def show(self):\n \"\"\"\n Custom show that executes the application environment if it doesnt exist.\n :return:\n \"\"\"\n if self._wrapper:\n instance.waitTillClose(self._wrapper)\n else:\n instance.waitTillClose(self)\n\n\nclass HFXLayoutError(Exception):\n \"\"\"\n Error for when layouts are unable to be set.\n \"\"\"\n\n\ndef applyHFXStyle(widget):\n widget.setStyle(QtGui.QStyleFactory().create('Plastique'))\n widget.setStyleSheet(instance.hfxStylesheet())\n\n","sub_path":"HFX/hfx_gui/utilities/PySide_to_HFX.py","file_name":"PySide_to_HFX.py","file_ext":"py","file_size_in_byte":20329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"236446427","text":"# Find the contiguous subarray within an array (containing at least one number) which has the largest sum.\n#\n# For example, given the array [−2,1,−3,4,−1,2,1,−5,4],\n# the contiguous subarray [4,−1,2,1] has the largest sum = 6.\n\nimport time\n\n\nclass Solution(object):\n def maxSubArray(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n max_ = nums[0]\n for i in range(len(nums)):\n max_sub = nums[i]\n tmp = nums[i]\n for j in range(i+1, len(nums)):\n tmp += nums[j]\n max_sub = max(tmp, max_sub)\n max_ = max(max_sub, max_)\n return max_\n\n def maxSub(self, nums):\n max_list = [0 for _ in range(len(nums))]\n max_list[0] = nums[0]\n for i in range(1, len(nums)):\n max_list[i] = max(max_list[i-1] + nums[i], nums[i])\n return max(max_list)\n\n def maxSub2(self, nums):\n tmp = nums[0]\n max_ = nums[0]\n for i in range(1, len(nums)):\n tmp = max(tmp + nums[i], nums[i])\n max_ = max(tmp, max_)\n return max_\n\n\nif __name__ == \"__main__\":\n obj = Solution()\n\n start_time = time.time()\n print(obj.maxSubArray([-2, 1, -3, 4, -1, 2, 1, -5, 4]))\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n\n start_time = time.time()\n print(obj.maxSub([-2, 1, -3, 4, -1, 2, 1, -5, 4]))\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n\n start_time = time.time()\n print(obj.maxSub2([-2, 1, -3, 4, -1, 2, 1, -5, 4]))\n print(\"--- %s seconds ---\" % (time.time() - start_time))","sub_path":"Python/053_Maximum_Subarray/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"648999142","text":"\"\"\"\nWrite a documentation for the simple function below. Your partner will have to\nimplement the function, without knowing the code. Send your partner the\ndocumentation and see if he can work with it.\nNo cheating! Don't show or tell hem the code directly\n\"\"\"\ndef function_2c(w, x, y, z):\n \"\"\"\n Calculate basic arithmetic operations (multiplication, division, addition\n and subtraction). Calculates x*y, x/y, w+z and w-z and returns them as\n a dictionary.\n\n Arguments:\n w, x, y, z -- Numbers to use for calculations\n\n Returns:\n A dictionary with keys 'multiply', 'divide', 'add' and 'subtract' which\n map to the corresponding values.\n \"\"\"\n\n\n multiplication = x * y\n division = x / y\n addition = w + z\n subtraction = w - z\n\n results = {\"multiply\": multiplication,\n \"divide\": division,\n \"add\": addition,\n \"subtract\": subtraction}\n\n return results\n","sub_path":"assignment_2c.py","file_name":"assignment_2c.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"581961163","text":"# -*- coding: utf-8 -*-\nimport random\nfrom respostas import get_respostas\nfrom conversas import salva\n\nclass Intencao:\n\n\n def __init__(self, nome, io, msg_analizada, cm, cliente=None, modo=\"1\"):\n self.nome = nome\n self.io = io\n self.msg_analizada = msg_analizada\n self.cm = cm\n\n def executa(self):\n respostas = get_respostas(self.nome)\n resposta = random.choice(respostas)\n self.io.imprime(resposta)\n salva(self.msg_analizada)","sub_path":"intencoes/intencao.py","file_name":"intencao.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"561407415","text":"import logging\nfrom PIL import Image\n\nfrom pixelsort.util import crop_to\nfrom pixelsort.sorter import sort_image\nfrom pixelsort.constants import DEFAULTS\nfrom pixelsort.interval import choices as interval_choices\nfrom pixelsort.sorting import choices as sorting_choices\n\n\ndef pixelsort(\n image, mask_image=None, interval_image=None, randomness=DEFAULTS[\"randomness\"],\n clength=DEFAULTS[\"clength\"], sorting_function=DEFAULTS[\"sorting_function\"],\n interval_function=DEFAULTS[\"interval_function\"],\n lower_threshold=DEFAULTS[\"lower_threshold\"], upper_threshold=DEFAULTS[\"upper_threshold\"],\n angle=DEFAULTS[\"angle\"]\n):\n\n original = image\n image = image.convert('RGBA').rotate(angle, expand=True)\n image_data = image.load()\n\n mask_image = mask_image if mask_image else Image.new(\"1\", original.size, color=255)\n\n mask_data = (mask_image.convert('1').rotate(angle, expand=True, fillcolor=0).load())\n\n interval_image = (interval_image.convert('1').rotate(angle, expand=True)) if interval_image else None\n\n logging.debug(\"Determining intervals...\")\n\n intervals = interval_choices[interval_function](\n image,\n lower_threshold=lower_threshold,\n upper_threshold=upper_threshold,\n clength=clength,\n interval_image=interval_image,\n )\n\n logging.debug(\"Sorting pixels...\")\n\n sorted_pixels = sort_image(\n image.size,\n image_data,\n mask_data,\n intervals,\n randomness,\n sorting_choices[sorting_function])\n\n output_img = _place_pixels(\n sorted_pixels,\n mask_data,\n image_data,\n image.size)\n\n if angle != 0:\n output_img = output_img.rotate(-angle, expand=True)\n output_img = crop_to(output_img, original)\n\n return output_img\n\n\ndef _place_pixels(pixels, mask, original, size):\n output_img = Image.new('RGBA', size)\n for y in range(size[1]):\n count = 0\n for x in range(size[0]):\n if not mask[x, y]:\n output_img.putpixel((x, y), original[x, y])\n else:\n output_img.putpixel((x, y), pixels[y][count])\n count += 1\n return output_img\n","sub_path":"pixelsort/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"623524635","text":"# coding: utf-8\n\nfrom flask import render_template\nfrom flask import Blueprint\n\nimport pandas as pd\nimport numpy as np\nimport datetime\nfrom scipy.stats import norm\nimport math\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom cStringIO import StringIO\nimport base64\nfrom flask import request\n\nViewCalculate = Blueprint('ViewCalculate', __name__)\n\ndef CalcNormalDist(no_of_times, mean, sd, initValue):\n y = [initValue]\n for i in range(no_of_times):\n r = 1+np.random.normal(mean, sd, 1)*0.01\n v = y[i-1]*r[0]\n y.append(v)\n return y[-1]\n\n@ViewCalculate.route('/calculate',methods=['POST','GET'])\ndef calculate():\n holding_days = int(request.form['holdingDays'])\n current_rate = float(request.form['currentRate'])\n # Importera EXCEL-fil.. ska senare bytas ut mot api\n xls_file = pd.ExcelFile('./rates.xls')\n df = xls_file.parse('Macrobond data')\n df.set_index(['date'],inplace=True)\n df = df[df.columns[0:1]]\n #df\n # Räkna ut %-uell skillnad mellan dagar\n df_change = df.pct_change()*100\n index = ['Payments', 'Sum (SEK)', 'Average holding period', 'Low', 'High', 'Mean', 'n', 'Standard dev', 'Mean perc change', 'SD of perc change']\n columns = ['USD']\n df_input = pd.DataFrame(index=index, columns=columns)\n df_input.ix['Payments'] = 780\n df_input.ix['Sum (SEK)'] = 7188413340.65\n df_input.ix['Average holding period'] = holding_days\n df_input.ix['Low'] = df[df.columns[0:]].min().values\n df_input.ix['High'] = df[df.columns[0:]].max().values\n df_input.ix['Mean'] = df[df.columns[0:]].mean().values\n df_input.ix['n'] = df.count().values\n df_input.ix['Standard dev'] = df[df.columns[0:]].std().values\n df_input.ix['Mean_change'] = df_change[df_change.columns[0:]].mean().values\n df_input.ix['SD_change'] = df_change[df_change.columns[0:]].std().values\n inputValues = df_input.to_html()\n \n # Kör sim över \"Average holding period\"\n mean = df_input.ix['Mean_change'].values[0]\n sd = df_input.ix['SD_change'].values[0]\n \n todays_date = datetime.datetime.now().date()\n index = pd.date_range(todays_date+datetime.timedelta(1), periods=holding_days, freq='D')\n\n NormalDist = 1+np.random.normal(mean, sd, holding_days)*0.01\n sim1_temp = []\n sim1_temp.append(current_rate)\n\n for i in range(1, holding_days):\n prior_value = sim1_temp[i-1]\n next_value = prior_value*NormalDist[i-1]\n sim1_temp.append(next_value)\n df_sim1_results = pd.DataFrame(columns=[\"NormalDist\",\"iterations\"], index=index)\n df_sim1_results[\"NormalDist\"] = NormalDist\n df_sim1_results[\"iterations\"] = sim1_temp\n\n # efter en simulation av 'Average holding period' tar man sista värdet\n # Gör simualtion av värde utv. 10k gånger\n\n \n sim2_temp = []\n for i in range(0, 10000):\n val = CalcNormalDist(holding_days,mean,sd,current_rate)\n sim2_temp.append(val)\n df_sim2_results = pd.DataFrame(columns=[\"iterations\"])\n df_sim2_results[\"iterations\"] = sim2_temp\n\n result_median = df_sim2_results['iterations'].median()\n result_sd = df_sim2_results['iterations'].std()\n result_avg = df_sim2_results['iterations'].mean()\n\n bins = 9\n half_bins = math.floor(bins/2)\n \n result_bins = []\n result_bins.append(result_avg-half_bins*result_sd)\n for i in range(1, bins):\n prior_value = result_bins[i-1]\n next_value = prior_value+result_sd\n result_bins.append(next_value)\n #result_bins = np.asarray(result_bins)\n #result_bins = pd.Series(result_bins).to_frame(name=\"bins\")\n #return \"result_bins\"\n\n tenLow = df_sim2_results['iterations'].nsmallest(1000).tail(1).values[0]\n tenHigh = df_sim2_results['iterations'].nlargest(1000).tail(1).values[0]\n\n fiveLow = df_sim2_results['iterations'].nsmallest(500).tail(1).values[0]\n fiveHigh = df_sim2_results['iterations'].nlargest(500).tail(1).values[0]\n\n count,division = np.histogram(df_sim2_results['iterations'],bins=result_bins)\n\n dfPlot = pd.Series(count, index=division[:-1]).to_frame()\n dfPlot.plot()\n bins = dfPlot.to_html()\n dfPlot.plot()\n io = StringIO()\n plt.savefig(io, format='png')\n data = base64.encodestring(io.getvalue())\n #return data\n return render_template('calculate.html', imgData=data, median=result_median, mean=result_avg, sd=result_sd, tenLow=tenLow, tenHigh=tenHigh, fiveLow=fiveLow, fiveHigh=fiveHigh,inputValues=inputValues,bins=bins)\n","sub_path":"routes/calculate.py","file_name":"calculate.py","file_ext":"py","file_size_in_byte":4470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"82064197","text":"# https://leetcode.com/problems/binary-tree-zigzag-level-order-traversal/\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:\n ans = []\n level_list = deque()\n if root is None:\n return []\n # start with the level 0 with a delimiter\n node_queue = deque([root, None])\n is_order_left = True\n\n while len(node_queue) > 0:\n curr_node = node_queue.popleft()\n\n if curr_node:\n if is_order_left:\n level_list.append(curr_node.val)\n else:\n level_list.appendleft(curr_node.val)\n\n if curr_node.left:\n node_queue.append(curr_node.left)\n if curr_node.right:\n node_queue.append(curr_node.right)\n else:\n # we finish one level\n ans.append(level_list)\n # add a delimiter to mark the level\n if len(node_queue) > 0:\n node_queue.append(None)\n\n # prepare for the next level\n level_list = deque()\n is_order_left = not is_order_left\n\n return ans\n ","sub_path":"103_binary-tree-zigzag-level-order-traversal.py","file_name":"103_binary-tree-zigzag-level-order-traversal.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"375149171","text":"#encoding=utf8\nfrom django.shortcuts import render\nfrom django.views.generic import View\nfrom django.http import HttpResponse\nfrom .models import *\nfrom pure_pagination import Paginator, EmptyPage, PageNotAnInteger\nfrom forms import UserAskForm\nfrom operation.models import UserFavorite\nfrom courses.models import Course\n#from utils.mixin_utils import LoginRequiredMixin\n# Create your views here.\n\nclass OrgView(View):\n '''课程机构'''\n def get(self,request):\n all_org = CourseOrg.objects.all()\n all_citys = CityDict.objects.all()\n org_nums = all_org.count()\n all_students = 0\n all_courses = 0\n\n city_id = request.GET.get(\"city\",\"\")\n\n if city_id:\n all_org = all_org.filter(city_id=int(city_id))\n\n category= request.GET.get(\"ct\",\"\")\n if category:\n all_org = all_org.filter(category=category)\n\n for org in all_org:\n all_students =all_students + org.students\n all_courses = all_courses +org.course_nums\n\n #分页\n try:\n page = request.GET.get('page', 1)\n except PageNotAnInteger:\n page = 1\n\n p = Paginator(all_org, 2, request=request)\n\n orgs = p.page(page)\n\n\n\n context = {\"all_orgs\": orgs, \"all_citys\": all_citys,\n \"org_nums\": org_nums, \"all_students\": all_students,\n \"all_courses\": all_courses,\"category\":category,\n \"city_id\":city_id}\n\n return render(request,\"org_list.html\",context)\n\n\nclass AddUserAskView(View):\n '''用户添加咨询'''\n\n def post(self, request):\n userask_form = UserAskForm(request.POST)\n if userask_form.is_valid():\n user_ask = userask_form.save(commit=True)\n return HttpResponse('{\"status\":\"success\"}', content_type='application/json')\n else:\n return HttpResponse('{\"status\":\"fail\", \"msg\":\"添加出错\"}', content_type='application/json')\n\n\nclass OrgHomeView(View):\n def get(self,request,org_id):\n course_org = CourseOrg.objects.get(id=int(org_id))\n course_org.click_nums +=1\n course_org.save()\n all_courses = course_org.course_set.all()[:3]\n all_teachers = course_org.teacher_set.all()[:1]\n context = {\n 'all_courses':all_courses,\n 'all_teachers':all_teachers,\n 'org':course_org,\n }\n return render(request, 'org-detail-homepage.html',context)\n\n\nclass OrgCourseView(View):\n def get(self,request,org_id):\n course_org = CourseOrg.objects.get(id=int(org_id))\n course_org.click_nums += 1\n course_org.save()\n all_courses = course_org.course_set.all()\n\n context = {\n 'all_courses': all_courses,\n 'org': course_org,\n }\n return render(request, 'org-detail-course.html', context)\n\n\n\nclass OrgDescView(View):\n def get(self,request,org_id):\n course_org = CourseOrg.objects.get(id=int(org_id))\n course_org.click_nums += 1\n course_org.save()\n\n context = {\n\n 'org': course_org,\n }\n return render(request, 'org-detail-desc.html', context)\n\n\nclass OrgTeacherView(View):\n def get(self,request,org_id):\n course_org = CourseOrg.objects.get(id=int(org_id))\n course_org.click_nums += 1\n course_org.save()\n all_teachers = course_org.teacher_set.all()[:1]\n context = {\n 'all_teachers': all_teachers,\n 'org': course_org,\n }\n return render(request, 'org-detail-teachers.html', context)\n\n\nclass AddFavView(View):\n '''用户收藏'''\n def post(self,request):\n fav_id = request.POST.get('fav_id',0)\n fav_type = request.POST.get('fav_type',0)\n user = request.user\n if not request.user.is_authenticated():\n return HttpResponse('{\"status\":\"fail\", \"msg\":\"用户未登录\"}', content_type='application/json')\n exist_records = UserFavorite.objects.filter(user = request.user,fav_id = int(fav_id),fav_type=int(fav_type))\n\n if exist_records:\n exist_records.delete()\n return HttpResponse('{\"status\":\"success\", \"msg\":\"已取消收藏\"}', content_type='application/json')\n else:\n user_fav = UserFavorite()\n if int(fav_id) > 0 and int(fav_type) > 0:\n user_fav.user = request.user\n user_fav.fav_id = int(fav_id)\n user_fav.fav_type = int(fav_type)\n user_fav.save()\n return HttpResponse('{\"status\":\"success\", \"msg\":\"已收藏\"}', content_type='application/json')\n else:\n return HttpResponse('{\"status\":\"fail\",\"msg\":\"收藏出错\"}',content_type='applicaton/json')\n\n","sub_path":"lyc_test/apps/organization/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"232789259","text":"\"\"\"\nTool Name: General G (Getis-Ord) Statistic\nSource Name: GeneralG.py\nVersion: ArcGIS 10.1\nAuthor: Environmental Systems Research Institute Inc.\nDescription: Computes General G statistic\n\"\"\"\n\n################### Imports ########################\nimport sys as SYS\nimport os as OS\nimport numpy as NUM\nimport xml.etree.ElementTree as ET\nimport arcgisscripting as ARC\nimport arcpy as ARCPY\nimport ErrorUtils as ERROR\nimport SSUtilities as UTILS\nimport SSDataObject as SSDO\nimport Stats as STATS\nimport WeightsUtilities as WU\nimport gapy as GAPY\nimport SSReport as REPORT\nimport locale as LOCALE\nLOCALE.setlocale(LOCALE.LC_ALL, '')\n\n################### GUI Interface ###################\n\ndef setupGeneralG():\n \"\"\"Retrieves the parameters from the User Interface and executes the\n appropriate commands.\"\"\"\n\n inputFC = ARCPY.GetParameterAsText(0) \n varName = ARCPY.GetParameterAsText(1).upper() \n displayIt = ARCPY.GetParameter(2) \n\n #### Parse Space Concept ####\n spaceConcept = ARCPY.GetParameterAsText(3).upper().replace(\" \", \"_\")\n if spaceConcept == \"INVERSE_DISTANCE_SQUARED\":\n exponent = 2.0\n else:\n exponent = 1.0\n try:\n spaceConcept = WU.convertConcept[spaceConcept] \n wType = WU.weightDispatch[spaceConcept]\n except:\n ARCPY.AddIDMessage(\"Error\", 723)\n raise SystemExit()\n\n #### EUCLIDEAN or MANHATTAN ####\n distanceConcept = ARCPY.GetParameterAsText(4).upper().replace(\" \", \"_\")\n concept = WU.conceptDispatch[distanceConcept]\n\n #### Row Standardized ####\n rowStandard = ARCPY.GetParameterAsText(5).upper()\n if rowStandard == 'ROW':\n rowStandard = True\n else:\n rowStandard = False\n\n #### Distance Threshold ####\n threshold = UTILS.getNumericParameter(6)\n\n #### Spatial Weights File ####\n weightsFile = UTILS.getTextParameter(7) \n if weightsFile == None and wType == 8:\n ARCPY.AddIDMessage(\"ERROR\", 930)\n raise SystemExit()\n if weightsFile and wType != 8:\n ARCPY.AddIDMessage(\"WARNING\", 925)\n weightsFile = None\n\n #### Create a Spatial Stats Data Object (SSDO) ####\n ssdo = SSDO.SSDataObject(inputFC, useChordal = True)\n\n #### Set Unique ID Field ####\n masterField = UTILS.setUniqueIDField(ssdo, weightsFile = weightsFile)\n\n #### Populate SSDO with Data ####\n if WU.gaTypes[spaceConcept]:\n ssdo.obtainDataGA(masterField, [varName], minNumObs = 3, \n warnNumObs = 30)\n else:\n ssdo.obtainData(masterField, [varName], minNumObs = 3, \n warnNumObs = 30)\n\n #### Run High-Low Clustering ####\n gg = GeneralG(ssdo, varName, wType, weightsFile = weightsFile, \n concept = concept, rowStandard = rowStandard, \n threshold = threshold, exponent = exponent)\n\n #### Report and Set Parameters ####\n ggString, zgString, pvString = gg.report()\n try:\n ARCPY.SetParameterAsText(8, ggString)\n ARCPY.SetParameterAsText(9, zgString)\n ARCPY.SetParameterAsText(10, pvString)\n except:\n ARCPY.AddIDMessage(\"WARNING\", 902)\n\n #### Create HTML Output ####\n if displayIt:\n htmlOutFile = gg.reportHTML(htmlFile = None)\n ARCPY.SetParameterAsText(11, htmlOutFile)\n\n#################### Classes ########################\n\nclass GeneralG(object):\n \"\"\"Calculates the General G Statistic:\n\n INPUTS: \n ssdo (obj): instance of SSDataObject\n varName (str): name of analysis field\n wType (int): spatial conceptualization (1)\n weightsFile {str, None}: path to a spatial weights matrix file\n concept: {str, EUCLIDEAN}: EUCLIDEAN or MANHATTAN \n rowStandard {bool, True}: row standardize weights?\n threshold {float, None}: distance threshold\n exponent {float, 1.0}: distance decay\n displayIt {bool, False}: create graphical html output?\n\n ATTRIBUTES:\n numObs (int): number of features in analysis\n y (array, numObs x 1): vector of field values\n gg (float): General G value \n ei (float): Expected value of General G\n vg (float): Var of General G (randomization)\n zg (float): z-score for General G \n pVal (float): p-value (two-tailed test)\n standDev (float): sqrt(vi)\n s0,s1,s2 (float): Spatial Weights Characteristics\n b0,b1,b2,b3,b4 (float): Spatial Weights Characteristics\n\n NOTES:\n (1) See the wTypeDispatch dictionary in WeightsUtilities.py for a \n complete list of spatial conceptualizations and their corresponding\n integer values.\n \"\"\"\n\n def __init__(self, ssdo, varName, wType, weightsFile = None, \n concept = \"EUCLIDEAN\", rowStandard = True, threshold = None,\n exponent = 1.0, displayIt = False):\n\n #### Set Initial Attributes ####\n UTILS.assignClassAttr(self, locals())\n\n #### Assess Whether SWM File Being Used ####\n self.swmFileBool = False \n if weightsFile:\n weightSuffix = weightsFile.split(\".\")[-1].lower()\n self.swmFileBool = (weightSuffix == \"swm\")\n\n ##### Warn Inverse Distance if Geographic Coord System ####\n #if wType in [0, 7]:\n # WU.checkGeographicCoord(self.ssdo.spatialRefType,\n # WU.wTypeDispatch[wType])\n\n #### Initialize Data ####\n self.initialize()\n\n #### Construct Based on SWM File or On The Fly ####\n self.construct()\n\n #### Calculate General G ####\n self.calculate()\n\n def initialize(self):\n \"\"\"Populates the instance of the Spatial Statistics Data \n Object (SSDataObject) and resolves a default distance threshold\n if none given.\n \"\"\"\n\n #### Shorthand Attributes ####\n ssdo = self.ssdo\n varName = self.varName\n concept = self.concept\n threshold = self.threshold\n exponent = self.exponent\n wType = self.wType\n rowStandard = self.rowStandard\n weightsFile = self.weightsFile\n swmFileBool = self.swmFileBool\n masterField = ssdo.masterField\n\n #### Get Data Array ####\n field = ssdo.fields[varName]\n self.y = field.returnDouble()\n self.numObs = ssdo.numObs\n maxSet = False\n\n #### Distance Threshold ####\n if wType in [0, 1, 7]:\n if threshold == None:\n threshold, avgDist = WU.createThresholdDist(ssdo, \n concept = concept)\n\n #### Assures that the Threshold is Appropriate ####\n gaExtent = UTILS.get92Extent(ssdo.extent)\n threshold, maxSet = WU.checkDistanceThreshold(ssdo, threshold,\n weightType = wType)\n\n #### If the Threshold is Set to the Max ####\n #### Set to Zero for Script Logic ####\n if maxSet:\n #### All Locations are Related ####\n if self.numObs > 500:\n ARCPY.AddIDMessage(\"Warning\", 717)\n self.thresholdStr = ssdo.distanceInfo.printDistance(threshold)\n else:\n self.thresholdStr = \"None\"\n\n #### Set Attributes ####\n self.maxSet = maxSet\n self.threshold = threshold\n self.master2Order = ssdo.master2Order\n self.swmFileBool = swmFileBool\n\n def construct(self):\n \"\"\"Constructs the neighborhood structure for each feature and\n dispatches the appropriate values for the calculation of the\n statistic.\"\"\"\n\n #### Shorthand Attributes ####\n ssdo = self.ssdo\n varName = self.varName\n concept = self.concept\n gaConcept = concept.lower()\n threshold = self.threshold\n exponent = self.exponent\n wType = self.wType\n rowStandard = self.rowStandard\n numObs = self.numObs\n master2Order = self.master2Order\n masterField = ssdo.masterField\n weightsFile = self.weightsFile\n\n #### Check That All Input Values are Positive ####\n if NUM.sum(self.y < 0.0) != 0:\n ARCPY.AddIDMessage(\"Error\", 915)\n raise SystemExit()\n\n #### Assure that Variance is Larger than Zero ####\n yVar = NUM.var(self.y)\n if NUM.isnan(yVar) or yVar <= 0.0:\n ARCPY.AddIDMessage(\"Error\", 906)\n raise SystemExit()\n\n #### Create Base Data Structures/Variables #### \n self.numer = 0.0\n self.denom = 0.0\n self.rowSum = NUM.zeros(numObs)\n self.colSum = NUM.zeros(numObs)\n self.ySum = 0.0 \n self.y2Sum = 0.0\n self.y3Sum = 0.0\n self.y4Sum = 0.0\n self.s0 = 0\n self.s1 = 0\n self.wij = {}\n\n #### Set Neighborhood Structure Type ####\n if self.weightsFile:\n if self.swmFileBool:\n #### Open Spatial Weights and Obtain Chars ####\n swm = WU.SWMReader(weightsFile)\n N = swm.numObs\n rowStandard = swm.rowStandard\n\n #### Check to Assure Complete Set of Weights ####\n if numObs > N:\n ARCPY.AddIDMessage(\"Error\", 842, numObs, N)\n raise SystemExit()\n \n #### Check if Selection Set ####\n isSubSet = False\n if numObs < N:\n isSubSet = True\n iterVals = xrange(N)\n else:\n #### Warning for GWT with Bad Records/Selection ####\n if ssdo.selectionSet or ssdo.badRecords:\n ARCPY.AddIDMessage(\"WARNING\", 1029)\n\n #### Build Weights Dictionary ####\n weightDict = WU.buildTextWeightDict(weightsFile, master2Order)\n iterVals = master2Order.iterkeys() \n N = numObs\n\n elif wType in [4, 5]:\n #### Polygon Contiguity ####\n if wType == 4:\n contiguityType = \"ROOK\"\n else:\n contiguityType = \"QUEEN\"\n contDict = WU.polygonNeighborDict(ssdo.inputFC, ssdo.oidName,\n contiguityType = contiguityType)\n iterVals = master2Order.keys()\n N = numObs\n\n else:\n gaTable = ssdo.gaTable\n gaSearch = GAPY.ga_nsearch(gaTable)\n if wType == 7:\n #### Zone of Indiff, All Related to All ####\n gaSearch.init_nearest(threshold, numObs, gaConcept)\n else:\n #### Inverse and Fixed Distances ####\n gaSearch.init_nearest(threshold, 0, gaConcept)\n iterVals = xrange(numObs)\n N = numObs\n neighWeights = ARC._ss.NeighborWeights(gaTable, gaSearch,\n weight_type = wType,\n exponent = exponent,\n row_standard = rowStandard)\n\n #### Create Progressor ####\n ARCPY.SetProgressor(\"step\", ARCPY.GetIDMessage(84007), 0, N, 1)\n\n #### Create Neighbor Info Class ####\n ni = WU.NeighborInfo(masterField)\n\n #### Calculation For Each Feature ####\n for i in iterVals:\n if self.swmFileBool:\n #### Using SWM File ####\n info = swm.swm.readEntry()\n masterID = info[0]\n if master2Order.has_key(masterID):\n rowInfo = WU.getWeightsValuesSWM(info, master2Order,\n self.y, \n rowStandard = rowStandard,\n isSubSet = isSubSet)\n includeIt = True\n else:\n includeIt = False\n\n elif self.weightsFile and not self.swmFileBool:\n #### Text Weights ####\n masterID = i\n includeIt = True\n rowInfo = WU.getWeightsValuesText(masterID, master2Order,\n weightDict, self.y)\n\n elif wType in [4, 5]:\n #### Polygon Contiguity ####\n masterID = i\n includeIt = True\n rowInfo = WU.getWeightsValuesCont(masterID, master2Order,\n contDict, self.y, \n rowStandard = rowStandard)\n\n else:\n #### Distance Based ####\n masterID = gaTable[i][0]\n includeIt = True\n rowInfo = WU.getWeightsValuesOTF(neighWeights, i, self.y)\n\n #### Subset Boolean for SWM File ####\n if includeIt:\n #### Parse Row Info ####\n orderID, yiVal, nhIDs, nhVals, weights = rowInfo\n\n #### Assure Neighbors Exist After Selection ####\n nn, nhIDs, nhVals, weights = ni.processInfo(masterID, nhIDs, \n nhVals, weights)\n\n if nn:\n #### Process Feature Contribution to General G ####\n self.processRow(orderID, yiVal, nhIDs, \n nhVals, weights) \n\n #### Reset Progessor ####\n ARCPY.SetProgressorPosition()\n\n #### Clean Up ####\n if self.swmFileBool:\n swm.close()\n \n #### Report on Features with No Neighbors ####\n ni.reportNoNeighbors()\n\n #### Report on Features with Large Number of Neighbors ####\n ni.reportWarnings()\n ni.reportMaximums()\n self.neighInfo = ni\n\n def processRow(self, orderID, yiVal, nhIDs, nhVals, weights):\n \"\"\"Processes a features contribution to the General G statistic.\n \n INPUTS:\n orderID (int): order in corresponding numpy value arrays\n yiVal (float): value for given feature\n nhIDs (array, nn): neighbor order in corresponding numpy value arrays\n nhVals (array, nn): values for neighboring features (1)\n weights (array, nn): weight values for neighboring features (1)\n\n NOTES:\n (1) nn is equal to the number of neighboring features\n \"\"\"\n \n #### Process Sums ####\n sumW = weights.sum()\n self.s0 += sumW\n yiVal2 = yiVal**2.0\n self.ySum += yiVal\n self.y2Sum += yiVal2\n self.y3Sum += (yiVal**3.0)\n self.y4Sum += (yiVal**4.0)\n\n self.denom += (NUM.sum(self.y * yiVal)) - yiVal2\n self.numer += NUM.sum(nhVals * weights) * yiVal\n\n #### Weights Charactersitics Update ####\n c = 0\n for neighID in nhIDs:\n ij = (orderID, neighID)\n ji = (neighID, orderID)\n w = weights[c] \n self.s1 += w**2.0\n try:\n self.s1 += 2.0 * w * self.wij.pop(ji)\n except:\n self.wij[ij] = w\n self.rowSum[orderID] += w\n self.colSum[neighID] += w\n c += 1\n\n def calculate(self):\n \"\"\"Calculate General G Statistic.\"\"\"\n\n s0 = self.s0\n s1 = self.s1\n n = len(self.rowSum)\n s2 = ((self.rowSum + self.colSum)**2.0).sum()\n self.s2 = s2\n self.gg = (self.numer * 1.0) / self.denom\n self.eg = (self.s0 * 1.0) / (n * (n-1))\n self.squareExpectedG = self.eg**2.0\n self.n = n\n s02 = s0 * s0\n n2 = n * n\n s02 = s0**2\n b0 = (s1 * (n2 - (3*n) + 3)) - (n*s2) + (3*s02)\n b1 = - ( s1*(n2 - n) - (2*n*s2) + (6*s02) )\n b2 = - ( (2*n*s1) - (s2*(n+3)) + (6*s02) )\n b3 = (4 * (n-1) * s1) - (2 * (n+1) * s2) + (8*s02)\n b4 = s1 - s2 + s02 \n varNumer = (b0 * self.y2Sum**2.0) + (b1 * self.y4Sum) + \\\n (b2 * self.ySum**2.0 * self.y2Sum) + \\\n (b3 * self.ySum * self.y3Sum) + \\\n (b4 * self.ySum**4.0)\n varDenom = (self.ySum**2 - self.y2Sum)**2.0 * \\\n (n * (n-1) * (n-2) * (n-3))\n self.expectedSquaredG = (varNumer * 1.0) / varDenom\n self.vg = self.expectedSquaredG - self.squareExpectedG\n\n #### Assure that Variance is Larger than Zero ####\n if NUM.isnan(self.vg) or self.vg <= 0.0:\n ARCPY.AddIDMessage(\"Error\", 906)\n raise SystemExit()\n\n self.standDev = NUM.sqrt(self.vg)\n self.zg = (self.gg - self.eg) / (self.standDev * 1.0)\n self.b0 = b0\n self.b1 = b1\n self.b2 = b2\n self.b3 = b3\n self.b4 = b4\n self.pVal = STATS.zProb(self.zg, type = 2)\n\n def report(self, fileName = None):\n \"\"\"Reports the General G results as a message or to a file. If\n self.displayIt is set to True, then an html graphical report is\n generated to your default temp directory.\n\n INPUTS:\n fileName {str, None}: path to a text file to populate with results.\n \"\"\"\n\n #### Create Output Text Table ####\n header = ARCPY.GetIDMessage(84159)\n ggString = LOCALE.format(\"%0.6f\", self.gg)\n egString = LOCALE.format(\"%0.6f\", self.eg)\n vgString = LOCALE.format(\"%0.6f\", self.vg) \n zgString = LOCALE.format(\"%0.6f\", self.zg) \n pvString = LOCALE.format(\"%0.6f\", self.pVal) \n row1 = [ ARCPY.GetIDMessage(84153), ggString]\n row2 = [ ARCPY.GetIDMessage(84154), egString]\n row3 = [ ARCPY.GetIDMessage(84150), vgString]\n row4 = [ ARCPY.GetIDMessage(84151), zgString]\n row5 = [ ARCPY.GetIDMessage(84152), pvString]\n results = [row1, row2, row3, row4, row5]\n outputTable = UTILS.outputTextTable(results, header = header,\n pad = 1)\n\n #### Add Linear/Angular Unit ####\n if self.wType in [0, 1, 7]:\n distanceOut = self.ssdo.distanceInfo.outputString\n dmsg = ARCPY.GetIDMessage(84344)\n distanceMeasuredStr = dmsg.format(distanceOut)\n outputTable += \"\\n%s\\n\" % distanceMeasuredStr\n\n #### Write/Report Text Output ####\n if fileName:\n f = UTILS.openFile(fileName, \"w\")\n f.write(outputTable)\n f.close()\n else:\n ARCPY.AddMessage(outputTable)\n \n #### Set Formatted Floats ####\n self.ggString = ggString\n self.egString = egString\n self.vgString = vgString\n self.zgString = zgString\n self.pvString = pvString\n \n return ggString, zgString, pvString\n\n def reportHTML(self, htmlFile = None):\n \"\"\"Generates a graphical html report for General G.\"\"\"\n\n #### Shorthand Attributes ####\n zg = self.zg\n\n #### Progress and Create HTML File Name ####\n writeMSG = ARCPY.GetIDMessage(84228)\n ARCPY.SetProgressor(\"default\", writeMSG)\n ARCPY.AddMessage(writeMSG)\n if not htmlFile:\n prefix = ARCPY.GetIDMessage(84242)\n outputDir = UTILS.returnScratchWorkSpace()\n baseDir = UTILS.getBaseFolder(outputDir)\n htmlFile = UTILS.returnScratchName(prefix, fileType = \"TEXT\", \n scratchWS = baseDir,\n extension = \"html\")\n\n #### Obtain Correct Images ####\n imageDir = UTILS.getImageDir()\n lowStr = ARCPY.GetIDMessage(84245)\n highStr = ARCPY.GetIDMessage(84246)\n if zg <= -2.58:\n imageFile = OS.path.join(imageDir, \"lowGGValues01.png\")\n info = (\"1%\", lowStr)\n imageBox = OS.path.join(imageDir, \"dispersedBox01.png\")\n elif (-2.58 < zg <= -1.96):\n imageFile = OS.path.join(imageDir, \"lowGGValues05.png\")\n info = (\"5%\", lowStr)\n imageBox = OS.path.join(imageDir, \"dispersedBox05.png\")\n elif (-1.96 < zg <= -1.65):\n imageFile = OS.path.join(imageDir, \"lowGGValues10.png\")\n info = (\"10%\", lowStr)\n imageBox = OS.path.join(imageDir, \"dispersedBox10.png\")\n elif (-1.65 < zg < 1.65):\n imageFile = OS.path.join(imageDir, \"randomGGValues.png\")\n imageBox = OS.path.join(imageDir, \"randomBox.png\")\n elif (1.65 <= zg < 1.96):\n imageFile = OS.path.join(imageDir, \"highGGValues10.png\")\n info = (\"10%\", highStr)\n imageBox = OS.path.join(imageDir, \"clusteredBox10.png\")\n elif (1.96 <= zg < 2.58):\n imageFile = OS.path.join(imageDir, \"highGGValues05.png\")\n info = (\"5%\", highStr)\n imageBox = OS.path.join(imageDir, \"clusteredBox05.png\")\n else:\n imageFile = OS.path.join(imageDir, \"highGGValues01.png\")\n info = (\"1%\", highStr)\n imageBox = OS.path.join(imageDir, \"clusteredBox01.png\")\n\n #### Footnote ####\n footStart = ARCPY.GetIDMessage(84230).format(zg)\n if abs(zg) >= 1.65:\n footEnd = ARCPY.GetIDMessage(84231)\n footEnd = footEnd.format(*info)\n footerText = footStart + footEnd \n else:\n footEnd = ARCPY.GetIDMessage(84232)\n footerText = footStart + footEnd\n\n #### Root Element ####\n title = ARCPY.GetIDMessage(84247)\n reportElement, reportTree = REPORT.xmlReport(title = title)\n\n #### Begin Graphic SubElement ####\n graphicElement = REPORT.xmlGraphic(reportElement, imageFile, \n footerText = footerText)\n\n #### Floating Table ####\n rowVals = [ [ARCPY.GetIDMessage(84153), self.ggString, \"\"],\n [ARCPY.GetIDMessage(84151), self.zgString, imageBox],\n [ARCPY.GetIDMessage(84152), self.pvString, \"\"] ]\n\n fTable = REPORT.xmlTable(graphicElement, rowVals, \n tType = \"ssFloat\")\n\n #### General G Table ####\n rowVals = [ [ARCPY.GetIDMessage(84153), self.ggString],\n [ARCPY.GetIDMessage(84154), self.egString],\n [ARCPY.GetIDMessage(84150), self.vgString],\n [ARCPY.GetIDMessage(84151), self.zgString],\n [ARCPY.GetIDMessage(84152), self.pvString] ]\n\n mTable = REPORT.xmlTable(reportElement, rowVals,\n title = ARCPY.GetIDMessage(84159))\n\n #### Dataset Table ####\n rowVals = [ [UTILS.addColon(ARCPY.GetIDMessage(84233)), \n self.ssdo.inputFC],\n [UTILS.addColon(ARCPY.GetIDMessage(84016)), \n self.varName],\n [UTILS.addColon(ARCPY.GetIDMessage(84234)), \n WU.wTypeDispatch[self.wType]],\n [UTILS.addColon(ARCPY.GetIDMessage(84235)),\n self.concept],\n [UTILS.addColon(ARCPY.GetIDMessage(84236)), \n str(self.rowStandard)],\n [UTILS.addColon(ARCPY.GetIDMessage(84237)), \n self.thresholdStr],\n [UTILS.addColon(ARCPY.GetIDMessage(84238)), \n str(self.weightsFile)],\n [UTILS.addColon(ARCPY.GetIDMessage(84418)),\n str(self.ssdo.selectionSet)] ]\n\n dTable = REPORT.xmlTable(reportElement, rowVals,\n title = ARCPY.GetIDMessage(84239))\n\n #### Create HTML ####\n html = REPORT.report2html(reportTree, htmlFile)\n ARCPY.AddMessage(htmlFile)\n\n return htmlFile\n\nif __name__ == \"__main__\":\n setupGeneralG()\n\n","sub_path":"Simple Map viewer/bin/Debug/ArcGISRuntime10.2.7/LocalServer32/ArcToolbox/Scripts/GeneralG.py","file_name":"GeneralG.py","file_ext":"py","file_size_in_byte":23688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"597847122","text":"__author__ = 'nnduc_000'\n\nfrom scrapy.contrib.spiders.crawl import CrawlSpider\nfrom scraper_app.items import JobData\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy import Request\n\n\nclass JobDataSpider(CrawlSpider):\n name = \"staff_point_fi\"\n allowed_domains = [\"staffpoint.fi\"]\n start_urls = [\"https://www.staffpoint.fi/avoimet-tyopaikat/\"]\n\n def parse(self, response):\n hxs = HtmlXPathSelector(response)\n items = []\n jobs = hxs.select(\"//tr\")\n\n for row in jobs:\n item = JobData()\n item['title'] = row.select(\"./td[1]/a/text()\").extract()\n item['link'] = row.select(\"./td[1]/a/@href\").extract()\n item['location'] = row.select(\"./td[6]/div/text()\").extract()\n \n\n # We need to specify where we fetch data from\n item['source'] = \"www.staffpoint.fi\"\n items.append(item)\n\n # it return a first empty rows so we need to delete it before writing.\n items.remove(items[0])\n\n for item in items:\n item['title'] = item['title'][0].lower()\n item['link'] = \"https://www.staffpoint.fi\" + item['link'][0]\n item['location'] = item['location'][0]\n item['location'] = \" \".join(item['location'].split()).lower()\n # check if location is empty or not\n if item['location'] == \"\":\n item['location'] = \"find more about location on website\"\n\n # Now we are going to get the description of the job\n for item in items:\n request = Request(\"%s\" % item['link'], callback=self.description_parse)\n request.meta['item'] = item\n yield request\n\n def description_parse(self, response):\n item_list = []\n header = response.xpath(\"//div[@class='content_output']/p[1]/text()\").extract()\n header = header[0] + header[1]\n item = response.meta['item']\n item['description'] = header\n return item\n","sub_path":"spiders/Finland_spider/staff_point_fi.py","file_name":"staff_point_fi.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"30090304","text":"# code based on https://www.python-course.eu/tkinter_entry_widgets.php tutorial\nimport tkinter as tk\nfrom tkinter import messagebox\n\nfields = ('Nome', 'Título de Eleitor', 'Nome da mãe')\n\ndef fetch(entries):\n for key, entry in entries.items():\n text = entry.get()\n print('%s: \"%s\"' % (key, text)) \n\ndef clear_fields(entries):\n for _, entry in entries.items():\n entry.delete(0,tk.END)\n\ndef generate_document(entries):\n messagebox.showerror('Sem certidão', 'Não está quite com a justiça eleitoral!')\n\ndef makeform(root, fields):\n entries = {}\n for field in fields:\n row = tk.Frame(root)\n lab = tk.Label(row, width=22, text=field+\": \", anchor='w')\n ent = tk.Entry(row)\n ent.insert(0,\"0\")\n row.pack(side=tk.TOP, fill=tk.X, padx=5, pady=5)\n lab.pack(side=tk.LEFT)\n ent.pack(side=tk.RIGHT, expand=tk.YES, fill=tk.X)\n entries[field] = ent\n return entries\n \nif __name__ == '__main__':\n root = tk.Tk()\n ents = makeform(root, fields)\n root.bind('', (lambda event, e=ents: fetch(e)))\n b1 = tk.Button(root, text='Limpar campos',\n command=(lambda e=ents: clear_fields(e)))\n b1.pack(side=tk.LEFT, padx=5, pady=5)\n b2 = tk.Button(root, text='Emitir certidão',\n command=(lambda e=ents: generate_document(e)))\n b2.pack(side=tk.LEFT, padx=5, pady=5)\n root.mainloop()\n","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"520749314","text":"# Vendor\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\n\n\nclass Database():\n \"\"\" Database singleton. \"\"\"\n __instance = None # type: SQLAlchemy\n\n @classmethod\n def get_instance(cls, app: Flask = None):\n assert app is None or isinstance(app, Flask), \"The type of app must be the Flask class.\"\n if cls.__instance is None:\n if app is None:\n raise Exception(\"The app variable during the instantiation must not be None.\")\n cls.__instance = SQLAlchemy(app)\n return cls.__instance\n","sub_path":"src/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"66029297","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2016-10-08\n# @Author : Anh Hoang (anhhoang.work.mail@gmail.com)\n# @Project : FSCognitive\n# @Version : 1.0\n\nfrom commons import Utilities\nimport cv2\nimport time\nimport glob\nimport os\n\nDEFAULT_FACES = 4\n\n\nclass FaceRecognizer:\n \"\"\"This class recognizes faces in camera and captures them\"\"\"\n\n def __init__(self, camera):\n \"\"\"Initialization\n\n Args:\n camera (int): index of camera to use\n \"\"\"\n self.cascade_file = Utilities.absolute_path(__file__, 'cascade.xml')\n self.face_cascade = cv2.CascadeClassifier(self.cascade_file)\n self.video_capture = cv2.VideoCapture(camera)\n self.video_capture.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640)\n self.video_capture.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480)\n self.faces_captured = 1\n\n def start_capturing(self, is_register):\n \"\"\"Start the camera, look for faces and write to file when faces are captured\n\n Args:\n is_register (bool): determine if the session is\n registering or identifying\n If this argument is true then camera will capture 3\n photos of user. Otherwise, only 1 photo is captured\n \"\"\"\n if is_register is False:\n self.faces_captured = DEFAULT_FACES - 1\n while self.faces_captured < DEFAULT_FACES:\n # Read frame from video capture\n return_code, frame = self.video_capture.read()\n # Use Haar cascade to detect faces in captured frame\n faces = self.face_cascade.detectMultiScale(\n cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY),\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(30, 30),\n flags=cv2.cv.CV_HAAR_SCALE_IMAGE\n )\n # Looping through captured faces and write to file\n for (x, y, w, h) in faces:\n # All captured images will be saved in tmp folder in\n # core/face_recognizer\n filePath = Utilities.absolute_path(\n __file__, 'tmp/face%d.jpg' % self.faces_captured)\n self.faces_captured += 1\n cv2.imwrite(filePath, frame)\n if is_register:\n # Sleep 2 seconds so user can change face orientation\n time.sleep(2)\n break\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n self.stop_capturing()\n break\n\n # cv2.imshow('Video', frame)\n\n self.stop_capturing()\n\n def stop_capturing(self):\n \"\"\"End camera session\"\"\"\n self.video_capture.release()\n cv2.destroyAllWindows()\n\n def captured_faces(self):\n \"\"\"Get all captured images\"\"\"\n dir = os.path.dirname(__file__)\n return glob.glob(os.path.join(dir, 'tmp') + \"/*.jpg\")\n","sub_path":"fscognitive/core/face_recognizer/face_recognizer.py","file_name":"face_recognizer.py","file_ext":"py","file_size_in_byte":2971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"443363048","text":"\nimport os\nimport cobra\nimport numpy as np\n\nfrom cobra import Model, Reaction, Metabolite\nfrom Env_ball_class import Env_ball\n\n\nclass Ensemble_model:\n '''\n Create a model from a pool of reactions from related organisms.\n \n 1) Take a group of models\n 2) pool all of their \"rxn\" reactions\n 3) remove all exchange reactions\n 4) add all exchange reactions from the environmental ball \n 5) Even if all reactions are added, transporters are still necessary to convert met_e to met_c\n \n '''\n def __init__(self, label, path_to_models):\n self.path = path_to_models\n self.files = os.listdir(self.path)\n \n \n def clone_model_without_transp(self, model):\n mc = Model(model.id)\n \n for reaction in model.reactions:\n \n if 'EX_' in reaction.id and '_e' in reaction.id:\n pass\n else:\n for met in reaction.metabolites:\n if not mc.metabolites.has_id(met.id):\n mc.add_metabolites(met.copy())\n mc.add_reaction(reaction.copy())\n \n mc.reactions.bio1.objective_coefficient=1\n \n mc.optimize()\n return mc \n \n def make_ensemble(self):\n #open the first model\n model = cobra.io.read_sbml_model(os.path.join(self.path, self.files[0]))\n #join all non exchange reactopms from models\n \n \n mc= self.clone_model_without_transp(model)\n \n for i in self.files:\n mod= cobra.io.read_sbml_model(os.path.join(self.path, i))\n for reaction in mod.reactions:\n if 'rxn' in reaction.id:\n if not mc.reactions.has_id(reaction.id):\n mc.add_reaction(reaction.copy())\n mc.repair()\n return mc\n \n def add_transporter(self, model):\n \n \n ev = Env_ball(1000)\n \n for reaction in ev.transporters:\n met_id=reaction.replace('EX_','')\n met_name = ev.metabolites_d[met_id]\n react = Reaction(reaction)\n react.name = 'export of ' + met_name\n react.lower_bound = -1000. # This is the default\n react.upper_bound = 1000. # This is the default\n if not model.metabolites.has_id(met_id):\n m_e = Metabolite(met_id, name=met_name,compartment='e')\n react.add_metabolites({m_e: -1.0})\n model.add_reactions([react])\n else:\n react.add_metabolites({model.metabolites.get_by_id(met_id): -1.0})\n model.add_reactions([react])\n \n model.repair()\n model.optimize()\n \n def write_model(self, model, path):\n cobra.io.write_sbml_model(model,path) \n\n\n\n########### usage ##############\n \n#em = Ensemble_model('Actinomycetaceae', '/home/daniel/generative_models/models/vaginal_models/Actinomycetaceae/gapfilled')\n#ensembl = em.make_ensemble()\n#em.add_transporter(ensembl)\n#em.write_model(ensembl, '/home/daniel/generative_models/models/test_ensmbl.sbml')\n \n \n \n ","sub_path":"scripts/create_ensemble_model.py","file_name":"create_ensemble_model.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"498560098","text":"from datetime import datetime, timedelta\nimport lorem\nimport random\n\nfrom django.core.management.base import BaseCommand\n\nfrom kilo.models import Day, Workout\n\n\nclass Command(BaseCommand):\n @property\n def help(self):\n return '''\n Generate semi-realistic random workouts.\n '''\n\n def add_arguments(self, parser):\n parser.add_argument('start', help=\"Approximate YYYY-MM-DD date to start generating workouts\")\n parser.add_argument('end', help=\"Approximate YYYY-MM-DD date to finish generating workouts\")\n\n def handle(self, *args, **options):\n start = self._parse_date(options['start'])\n end = self._parse_date(options['end'])\n\n today = end\n while today >= start:\n today -= timedelta(days=random.choice([1, 1, 2]))\n if Day.objects.filter(day=today).exists():\n print(f\"Skipping {today} because it already exists\")\n continue\n day = Day(day=today, notes=lorem.sentence())\n day.save()\n try:\n workout = random.choice([\n self._generate_erg,\n self._generate_erg,\n self._generate_run,\n self._generate_run,\n self._generate_other,\n ])(day)\n workout.save()\n except Exception:\n day.delete()\n print(f\"Saved day: {day.day}: {workout.activity}\")\n\n def _parse_date(self, date_str):\n try:\n return datetime.strptime(date_str, \"%Y-%m-%d\").date()\n except ValueError:\n print(f\"Bad date {date_str}\")\n exit(1)\n\n def _generate_erg(self, day):\n distance = random.choice([2, 6])\n time = random.randint(470, 490) if distance == 2 else random.randint(1480, 1550)\n return Workout(\n day=day,\n activity='erging',\n distance_unit=Workout.KILOMETERS,\n distance=distance,\n seconds=time,\n )\n\n def _generate_run(self, day):\n distance = random.choice([4, 4, 5, 5, 10])\n pace = random.randint(480, 600)\n return Workout(\n day=day,\n activity='running',\n distance_unit=Workout.MILES,\n distance=distance,\n seconds=distance * pace,\n )\n\n def _generate_other(self, day):\n return Workout(\n day=day,\n activity=random.choice(['crossfit', 'stairs']),\n )\n","sub_path":"kilo/management/commands/generate_workouts.py","file_name":"generate_workouts.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"459826153","text":"import sys, os\nfilePath = os.environ[\"RAI_ROOT\"]\nsys.path.insert(0, filePath + '/RAI/include/rai/function/tensorflow/pythonProtobufGenerators')\n\nimport sys\nimport tensorflow as tf\nimport functions\nimport graph_structure\nimport core\nimport os\n\n# arguments\ndtype = int(sys.argv[1])\nsaving_dir = sys.argv[2]\ncomputeMode = sys.argv[3]\nfn_type = sys.argv[4]\ngs_type = sys.argv[5]\ngs_arg = sys.argv[6:]\n\n__import__(gs_type)\ngs = sys.modules[gs_type]\n\n__import__(fn_type)\nfn = sys.modules[fn_type]\nprint(fn)\n\ngs_method = getattr(gs, gs_type)\nfn_method = getattr(fn, fn_type)\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\n\nsession = tf.Session(config=config)\n\n# Device Configuration\nGPU_mode, Dev_list = core.dev_config(computeMode)\n\n\nwith tf.device(Dev_list[0]): # Base device(cpu mode: cpu0, gpu mode: first gpu on the list)\n gs_ob = gs_method(dtype, *gs_arg, fn=fn_method)\n fn_ob = fn_method(dtype, gs_ob)\n\nfile_name = fn_type + '_' + gs_type + '.pb'\ninitialize_all_variables_op = tf.variables_initializer(tf.global_variables(), name='initializeAllVariables')\ntf.train.write_graph(session.graph_def, saving_dir, file_name, as_text=False)\n","sub_path":"applications/DIY/proto/protobufGenerator.py","file_name":"protobufGenerator.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"630172259","text":"n = int(input())\n\nif n==0:\n print(0)\nelif n==1:\n print(1)\nelse:\n i=0\n f1=1\n f0=0\n while(i<=n-2):\n temp = f1\n f1=f1+f0\n f0=temp\n i+=1\n print(f1)\n","sub_path":"피보나치수2.py","file_name":"피보나치수2.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"576961280","text":"''' Sorry no comments :).\n'''\nimport Goban \nimport importlib\nimport time\nfrom io import StringIO\nimport sys\nfrom multiprocessing import Pool\n\ndef fileorpackage(name):\n if name.endswith(\".py\"):\n return name[:-3]\n return name\n \n\nclassNames = ['MCTSAgent','MCTSAgent2','abPlayer','Montecarlo'\n ,'randomPlayer', 'myPlayer','gnugoPlayer']\n\n\nsysstdout= sys.stdout\nstringio = StringIO()\ndef play_game(pl1, pl2):\n b = Goban.Board()\n outputs = [\"\",\"\"]\n players = []\n player1class = importlib.import_module(classNames[pl1])\n player1 = player1class.myPlayer()\n player1.newGame(Goban.Board._BLACK)\n players.append(player1)\n\n player2class = importlib.import_module(classNames[pl2])\n player2 = player2class.myPlayer()\n player2.newGame(Goban.Board._WHITE)\n players.append(player2)\n\n totalTime = [0,0] # total real time for each player\n nextplayer = 0\n nextplayercolor = Goban.Board._BLACK\n nbmoves = 1\n wrongmovefrom = 0\n\n while not b.is_game_over():\n # print(\"Referee Board:\")\n # b.prettyPrint() \n # print(\"Before move\", nbmoves)\n legals = b.legal_moves() # legal moves are given as internal (flat) coordinates, not A1, A2, ...\n # print(\"Legal Moves: \", [b.move_to_str(m) for m in legals]) # I have to use this wrapper if I want to print them\n nbmoves += 1\n otherplayer = (nextplayer + 1) % 2\n othercolor = Goban.Board.flip(nextplayercolor)\n \n currentTime = time.time()\n sys.stdout = stringio\n move = players[nextplayer].getPlayerMove() # The move must be given by \"A1\", ... \"J8\" string coordinates (not as an internal move)\n sys.stdout = sysstdout\n playeroutput = stringio.getvalue()\n stringio.truncate(0)\n stringio.seek(0)\n # print((\"[Player \"+str(nextplayer) + \"] \").join(playeroutput.splitlines(True)))\n outputs[nextplayer] += playeroutput\n totalTime[nextplayer] += time.time() - currentTime\n # print(\"Player \", nextplayercolor, players[nextplayer].getPlayerName(), \"plays: \" + move) #changed \n\n if not Goban.Board.name_to_flat(move) in legals:\n print(otherplayer, nextplayer, nextplayercolor)\n print(\"Problem: illegal move\")\n wrongmovefrom = nextplayercolor\n break\n b.push(Goban.Board.name_to_flat(move)) # Here I have to internally flatten the move to be able to check it.\n players[otherplayer].playOpponentMove(move)\n \n nextplayer = otherplayer\n nextplayercolor = othercolor\n\n print(\"The game is over\")\n # b.prettyPrint()\n result = b.result()\n print(\"Time:\", totalTime)\n print(\"GO Score:\", b.final_go_score())\n print(\"Winner: \", end=\"\")\n if wrongmovefrom > 0:\n if wrongmovefrom == b._WHITE:\n print(\"BLACK\")\n return 0,-1\n elif wrongmovefrom == b._BLACK:\n print(\"WHITE\")\n return -1,0\n else:\n print(\"ERROR\")\n return -1,-1\n elif result == \"1-0\":\n print(\"WHITE\")\n return 0,1\n elif result == \"0-1\":\n print(\"BLACK\")\n return 1,0\n else:\n print(\"DEUCE\")\n return 0,0\n\ndef play_ten(pl1,pl2):\n \"\"\" pl1=pls[0]\n pl2=pls[1] \"\"\"\n results = [0,0]\n for i in range(10):\n b,w = play_game(pl1,pl2)\n results[0]+=b\n results[1]+=w\n results[0]/=10\n results[1]/=10\n return results\n\nif __name__==\"__main__\":\n results=[]\n with Pool(processes=3) as pool:\n results = pool.starmap(play_ten, [(0,0),(1,1),(2,2),(3,3)])\n \n for i in range(len(results)):\n print(results[i])\n","sub_path":"Go/Testing.py","file_name":"Testing.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"180916013","text":"import numpy as np\nfrom datetime import datetime as dt\nfrom random import randint\n\ndef get_most_color(weighted_colors):\n rgb_cube = group_colors_by_cube(weighted_colors)\n start = dt.now()\n weight = 0\n color = None\n for i in range(3):\n for j in range(3):\n for k in range(3):\n tmp_weight = len(rgb_cube[i][j][k])\n if tmp_weight > weight:\n weight = tmp_weight\n npa_rgbs = np.array(rgb_cube[i][j][k])\n ave_rgb = list(np.average(npa_rgbs, axis=0))\n color = list(map(int, ave_rgb))\n end = dt.now()\n return color\n\n\ndef group_colors_by_cube(weighted_colors):\n \"\"\"\n group colors to a 3*3*3 cube,\n three axis stand for r, g, b\n departed by the value like\n [0,85) -> 0, [85,170) -> 1, [170, 256) -> 2\n\n :param weighted_colors: [(weight, (r, g, b)), ...]\n :return:\n \"\"\"\n\n rgb_cube = [\n [[[], [], []], [[], [], []], [[], [], []]],\n [[[], [], []], [[], [], []], [[], [], []]],\n [[[], [], []], [[], [], []], [[], [], []]]\n ]\n\n for weighted_color in weighted_colors:\n weight = weighted_color[0]\n color_rgb = weighted_color[1]\n x, y, z = __locate_color_by_cube(color_rgb)\n for w in range(weight):\n rgb_cube[x][y][z].append(list(color_rgb))\n return rgb_cube\n\n\ndef __locate_color_by_cube(color_rgb):\n x = y = z = None\n location = [x, y, z]\n for i in range(3):\n elem = color_rgb[i]\n if elem < 85:\n location[i] = 0\n elif 85 <= elem < 170:\n location[i] = 1\n else:\n location[i] = 2\n\n return location\n\n\n# ==================================================================\n\ndef k_means(k, weighted_colors):\n # k 个cluster的而为数组\n clusters_arr = __init_clusters(k, weighted_colors, True)\n # 所有颜色的二维数组\n # 解包为[weight, r, g, b]\n colors_arr = __tile_weighted_colors(weighted_colors)\n\n palette = []\n weights = []\n\n recursion_times = 0\n old_offset_sum = None\n new_offset_sum = None\n\n while new_offset_sum is None \\\n or old_offset_sum is None \\\n or (new_offset_sum / old_offset_sum < 0.99 and recursion_times < 10):\n if new_offset_sum is not None and old_offset_sum is not None:\n print(\"Recursion {0} gets an optimization rate: {1}\".format(\n recursion_times, new_offset_sum / old_offset_sum\n ))\n\n start = dt.now()\n old_offset_sum = new_offset_sum\n new_offset_sum = 0\n\n palette = []\n weights = []\n\n clusted_colors = []\n for _ in range(k):\n clusted_colors.append(np.array([]))\n\n for color_arr in colors_arr:\n tile_color_arr = np.tile(color_arr[1:], (k, 1))\n tmp_arr = (tile_color_arr - clusters_arr) ** 2\n tmp_distance_arr = np.sqrt(np.sum(tmp_arr, axis=1))\n nearest_cluster_index = tmp_distance_arr.argmin()\n clusted_colors[nearest_cluster_index] = np.append(clusted_colors[nearest_cluster_index], color_arr)\n\n for i in range(k):\n # 算入palette & weights\n clusted_colors[i] = clusted_colors[i].reshape(len(clusted_colors[i]) // 4, 4)\n tmp_arr = clusted_colors[i].copy()\n tmp_weights = np.sum(tmp_arr[:, 0])\n for j in range(3):\n tmp_arr[:, 1+j] = tmp_arr[:, 1+j] * tmp_arr[:, 0]\n average_rgb_i = (np.sum(tmp_arr[:, 1:], axis=0) / tmp_weights).tolist()\n # average_rgb_i = np.average(clusted_colors[i][:, 1:] * clusted_colors[i][:, 0], axis=0).tolist()\n average_rgb_i = list(map(int, average_rgb_i))\n palette.append(average_rgb_i)\n weights.append(tmp_weights)\n\n # 使用新形成的palette作为cluster,计算offset\n palette_arr = np.array(palette)\n for i in range(k):\n ave_color_arr = palette_arr[i]\n clusted_color_arr = clusted_colors[i]\n tile_ave_arr = np.tile(ave_color_arr, (len(clusted_color_arr), 1))\n tmp_arr = (tile_ave_arr - clusted_color_arr[:, 1:]) ** 2\n tmp_distance_arr = np.sqrt(np.sum(tmp_arr, axis=1))\n new_offset_sum += np.sum(tmp_distance_arr)\n\n end = dt.now()\n print(\"K-MEANS recursion {0} time, takes {1}\".format(recursion_times + 1, end - start))\n\n clusters_arr = palette_arr.copy()\n recursion_times += 1\n\n print(\"Stop Recursion by a Optimization Rate: {}\".format(new_offset_sum / old_offset_sum))\n print()\n\n return palette, weights\n\n\ndef __init_clusters(k, weighted_colors, random=False):\n clusters = []\n length = len(weighted_colors)\n if random:\n for i in range(k):\n r = randint(0, length)\n cluster = weighted_colors[r][1]\n clusters.append(cluster)\n else:\n for i in range(k):\n clusters.append(list(weighted_colors[i * length // k][1]))\n return np.array(clusters)\n\n\ndef __tile_weighted_colors(weighted_colors):\n colors = []\n for weight, color in weighted_colors:\n colors.append([weight, *color])\n return np.array(colors)\n","sub_path":"hsk/alg.py","file_name":"alg.py","file_ext":"py","file_size_in_byte":5240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"231587667","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Feb 21 19:56:30 2020\r\n\r\n@author: CEC\r\n\"\"\"\r\n\r\ndef isYearLeap(year):\r\n if year % 400 == 0:\r\n return True \r\n elif year % 4 == 0 and year % 100 != 0:\r\n return True\r\n else:\r\n return False\r\n \r\ntestData= [1900, 2000, 2016, 1987]\r\ntestResults = [False, True, True, False]\r\n\r\nfor i in range (len(testData)):\r\n yr= testData[i]\r\n print (yr, \"->\", end = \"\")\r\n result= isYearLeap(yr)\r\n if result == testResults[i]:\r\n print(\"OK\")\r\n else: \r\n print(\"Failed\")","sub_path":"RetoAñoBisiesto.py","file_name":"RetoAñoBisiesto.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"60722257","text":"import sys\nimport cv2\nimport numpy as np\n\n# Load input image\n\nin_file = sys.argv[1]\nimage = cv2.imread(in_file)\ncv2.imshow('Input image', image)\n\nimage_gray1 = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\nimage_gray2 = np.float32(image_gray1)\n\n# Harris corner detector\nimage_harris1 = cv2.cornerHarris(image_gray2, 7, 5, 0.04)\n\n# Resultant image is dilated to mark the corners\nimage_harris2 = cv2.dilate(image_harris1, None)\n\n# Threshold the image\nimage[image_harris2 > 0.01 * image_harris2.max()] = [0, 0, 0]\n\ncv2.imshow('Harris Corners', image)\ncv2.waitKey()\n","sub_path":"Chapter05/Detecting_corner.py","file_name":"Detecting_corner.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"27903802","text":"mystr = \"\"\"\ndoublekills\ntriplekills\nquadrakills\npentakills\nfirstblood\nfirstbloodkill\nfirstbloodassist\nfirstbloodvictim\n\"\"\"\n\nmystr = mystr.split(\"\\n\")\n\nfor s in mystr:\n print(f'\"{s.strip()}\",')\n\n\n# goldat15\n# xpat15\n# csat15\n# opp_goldat15\n# opp_xpat15\n# opp_csat15\n# killsat15\n# assistsat15\n# deathsat15\n# opp_killsat15\n# opp_assistsat15\n# opp_deathsat15\n","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"654297281","text":"list = []\nlist_item = int(input(\"Enter no of item you want in list :- \"))\ni = 1\nfor i in range(i,list_item+1):\n item = int(input(f\"Enter {i} item :- \"))\n list.append(item)\nprint(list)\nlist.reverse()\nprint(list)\nlist[::-1]\nprint(list)\n\nfor i in range (int(len(list)/2)):\n temp = list[i]\n list[i] = list[len(list)-i]\n list[len(list)-i] = temp\n\nprint(list)","sub_path":"practice_prob3.py","file_name":"practice_prob3.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"95881177","text":"from sklearn.linear_model import LogisticRegression\nimport pickle\nimport pandas as pd\n\n\ndef load(path):\n train_X, train_y = [], []\n with open(path, \"r\") as f:\n data = f.readlines()\n for line in data:\n line = line.strip().split(\",\")\n train_X.append(list(map(float, line[1:])))\n train_y.append(int(line[0]))\n return train_X, train_y\n\ntrain_X, train_y = load(\"./data/train.feature.txt\")\ntest_X, test_y = load(\"./data/test.feature.txt\")\n\nwith open(\"bin/lr.model\", \"rb\") as f:\n lr = pickle.load(f)\n\ndef predict(x, lr):\n out = lr.predict_proba(x)\n preds = out.argmax(axis=1)\n probs = out.max(axis=1)\n return preds, probs\n\ndef accuracy(lr, xs, ts):\n ys = lr.predict(xs)\n return (ys == ts).mean()\n\nprint(\"train acc: {}, test acc: {}\".format(accuracy(lr, train_X, train_y), accuracy(lr, test_X, test_y)))\n# train acc: 0.9478659678023212, test acc: 0.9191616766467066\n","sub_path":"seiichi/chapter06/54.py","file_name":"54.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"93829245","text":"import os\n\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom flask_cors import CORS\nfrom flask_migrate import Migrate\n\n\ndb = SQLAlchemy()\ntoolbar = DebugToolbarExtension()\ncors = CORS()\nmigrate = Migrate()\n\n\ndef create_app():\n app = Flask(__name__)\n\n app_settings = os.getenv('APP_SETTINGS')\n app.config.from_object(app_settings)\n\n db.init_app(app)\n toolbar.init_app(app)\n cors.init_app(app)\n migrate.init_app(app, db)\n\n from project.api.users import users_blueprint\n app.register_blueprint(users_blueprint)\n\n @app.shell_context_processor\n def cxt():\n return {\n 'app': app,\n 'db': db\n }\n\n return app\n","sub_path":"services/users/project/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"70381851","text":"class Tag:\r\n def __init__(self, tag, klass=None, is_single=False, **kwargs):\r\n self.tag = tag\r\n self.text = \"\"\r\n self.attributes = {}\r\n self.is_single = is_single\r\n self.children = []\r\n self.indn = \"\"\r\n\r\n if klass is not None:\r\n self.attributes[\"class\"] = \" \".join(klass)\r\n\r\n for attr, value in kwargs.items():\r\n self.attributes[attr] = value\r\n\r\n def __enter__(self):\r\n self.indn += \"\\t\"\r\n return self\r\n\r\n def __exit__(self, type, value, traceback):\r\n self.text = self.AddTag()\r\n\r\n def __add__(self, other):\r\n other.indn = self.indn + \"\\t\"\r\n self.children.append(other)\r\n return self\r\n\r\n def __iadd__(self, other):\r\n other.indn = self.indn + \"\\t\"\r\n self.children.append(other)\r\n return self\r\n\r\n def AddTag(self):\r\n attrs = \"\"\r\n for attribute, value in self.attributes.items():\r\n attrs += (f' {attribute}=\"{value}\"')\r\n\r\n if self.children:\r\n opening = f\"{self.indn}<{self.tag}{attrs}>\\n\"\r\n\r\n internal = f\"{self.text}\"\r\n for child in self.children:\r\n internal += child.text\r\n ending = f\"{self.indn}\\n\"\r\n return opening + internal + ending\r\n else:\r\n if self.is_single:\r\n return f\"{self.indn}<{self.tag}{attrs}/>\\n\"\r\n else:\r\n return f\"{self.indn}<{self.tag}{attrs}>{self.text}\\n\"\r\n\r\n\r\nclass TopLevelTag(Tag):\r\n \"\"\"\r\n Фактически, разница между TopLevelTag и Tag в трех пунктах:\r\n - Объекты класса TopLevelTag скорее всего не содержат внутреннего текста;\r\n - Всегда парные;\r\n - Должна быть возможность задать атрибуты в Tag, но в данном задании для \r\n TopLevelTag это необязательное условие.\r\n\r\n Первый учитывать нельзя, так как \"скорее всего\", что ничего не гарантирует\r\n Второй и так учитывается в Tag так как он может быть и парным в том числе\r\n Третий \"необязательное условие\", так что тоже учитываем.\r\n\r\n Таким образом, классы получаются эквивалентны и нет смысла дублировать \r\n определение функции AddTag для экономии одного if (для второго условия).\r\n\r\n Так как по условию у нас все-так должно быть три класса, \r\n оставил его как наследника Tag, но пустым.\r\n \"\"\"\r\n pass\r\n\r\n\r\nclass HTML(Tag):\r\n def __init__(self, output):\r\n self.indn = \"\"\r\n self.text = \"\"\r\n self.children = []\r\n self.output = outputter1(output)\r\n\r\n def __enter__(self):\r\n return self\r\n\r\n def AddTag(self):\r\n if self.children:\r\n for child in self.children:\r\n self.text += child.text\r\n return (f\"\\n{self.text}\")\r\n\r\n def __exit__(self, type, value, traceback):\r\n self.text = self.AddTag()\r\n self.output(self.text)\r\n\r\n\r\ndef outputter1(outputtype):\r\n if outputtype:\r\n\r\n def outputter2(string):\r\n with open(outputtype, \"w\", encoding=\"utf8\") as x:\r\n x.write(string)\r\n print(f\"file {outputtype} is created\")\r\n\r\n return outputter2\r\n else:\r\n\r\n def outputter2(string):\r\n print(string)\r\n\r\n return outputter2\r\n\r\n\r\nif __name__ == \"__main__\":\r\n \"\"\"\r\n Если output = None, будет вывод на экран.\r\n Или вместо None можно указать имя файла\r\n \"\"\"\r\n with HTML(output=None) as doc:\r\n with TopLevelTag(\"head\") as head:\r\n with Tag(\"title\") as title:\r\n title.text = \"hello\"\r\n head += title\r\n doc += head\r\n\r\n with TopLevelTag(\"body\") as body:\r\n with Tag(\"h1\", klass=(\"main-text\", )) as h1:\r\n h1.text = \"Test\"\r\n body += h1\r\n\r\n with Tag(\"div\", klass=(\"container\", \"container-fluid\"), id=\"lead\") as div:\r\n body += div\r\n with Tag(\"p\") as paragraph:\r\n paragraph.text = \"another test\"\r\n div += paragraph\r\n\r\n with Tag(\"img\", is_single=True, src=\"/icon.png\") as img:\r\n div += img\r\n\r\n doc += body\r\n","sub_path":"b3.py","file_name":"b3.py","file_ext":"py","file_size_in_byte":4748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"643402889","text":"\nimport sys\nsys.setrecursionlimit(10**6)\n\n\ndef input(): return sys.stdin.readline().rstrip()\n\n# 현재 인덱스와, 현재까지의 합\n\n\nMax_idx = 0 # len numbers\nTarget = 0\nAns = 0\nNumbers = []\n\n\ndef go(idx, now):\n global Ans\n # 인덱스가 차면 체크후 되면 ++\n if idx == Max_idx:\n if now == Target:\n return 1\n return 0\n # 인덱스가 안찬경우 -> +,- 로 각각 재귀호출\n return go(idx+1, now+Numbers[idx])+go(idx+1, now-Numbers[idx])\n\n\ndef solution(numbers, target):\n global Max_idx, Target, Ans, Numbers\n Max_idx = len(numbers)\n Numbers = numbers\n Target = target\n return go(0, 0)\n\n\nsolution([1, 1, 1, 1, 1], 3)\nprint(Ans)\n\n\n# def solution(numbers, target):\n# if not numbers and target == 0:\n# return 1\n# elif not numbers:\n# return 0\n# else:\n# return solution(numbers[1:], target-numbers[0]) + solution(numbers[1:], target+numbers[0])\n","sub_path":"Programmers/etc/43165.py","file_name":"43165.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"51977508","text":"from hippiehug.Chain import Chain, Block, DocChain\n\n\ndef test_block_hash():\n store = {}\n b0 = Block( [b\"item1\", b\"item2\"], 0, [b\"A\", b\"B\"])\n store[b0.hid] = b0\n\n b1 = b0.next_block(store, [b\"item3\", b\"item4\"])\n\ndef test_block_duplicated():\n b0 = Block(items=[b\"item1\", b\"item2\"], index=0, fingers=[b\"A\", b\"B\"], aux=42)\n assert b0.aux == 42\n\n b1 = Block(b0.items, b0.index, b0.fingers, b0.aux)\n\n assert b0.items == b1.items\n assert b0.index == b1.index\n assert b0.fingers == b1.fingers\n assert b0.aux == b1.aux\n\ndef test_block_constructor_independence():\n items, fingers, aux = [], [], []\n b0 = Block(items, 0, fingers, aux)\n items.append(1)\n fingers.append(1)\n aux.append(1)\n assert b0.items == []\n assert b0.fingers == []\n assert b0.aux == []\n\n\ndef test_block_find():\n store = {}\n b0 = Block( [b\"item1\", b\"item2\"], 0, [])\n store[b0.hid] = b0\n\n for i in range(1, 99):\n item = list(map(lambda x: x.encode(),[ \"%s|%s\" % (i,j) for j in range(100) ]))\n assert len(item) == 100\n b0 = b0.next_block(store, item)\n\n res1 = b0.get_item(store, 50, 30)\n assert res1 == b\"50|30\"\n assert b0.get_item(store, 0, 1) == b\"item2\"\n\ndef test_chain():\n vals = []\n c = Chain()\n for i in range(0, 99):\n vals += [ (i,j,(\"%s|%s\" % (i,j)).encode()) for j in range(100)]\n\n items = list(map(lambda x: x.encode(),[ \"%s|%s\" % (i,j) for j in range(100) ]))\n c.multi_add(items)\n\n res1 = c.get(50, 30)\n assert res1 == b\"50|30\"\n assert c.get(0, 1) == b\"0|1\"\n\n for i, j, v in vals:\n assert c.get(i, j) == v\n\ndef test_chain_evidence():\n c = Chain()\n for i in range(0, 99):\n items = list(map(lambda x: x.encode(),[ \"%s|%s\" % (i,j) for j in range(100) ]))\n c.multi_add(items)\n\n evidence = {}\n res1 = c.get(50, 30, evidence)\n\n c2 = Chain(evidence, root_hash = c.head)\n assert c2.get(50, 30) == b\"50|30\"\n\ndef test_chain_doc():\n c = DocChain()\n for i in range(0, 99):\n items = list(map(lambda x: x.encode(),[ \"%s|%s\" % (i,j) for j in range(100) ]))\n c.multi_add(items)\n\n evidence = {}\n res1 = c.get(50, 30, evidence)\n\n c2 = DocChain(evidence, root_hash = c.head)\n assert c2.get(50, 30) == b\"50|30\"\n assert c2.check(c.root(), 50, 30, b\"50|30\")\n\nimport pytest\n\ndef test_chain_negative():\n c = Chain()\n for i in range(0, 99):\n items = list(map(lambda x: x.encode(),[ \"%s|%s\" % (i,j) for j in range(100) ]))\n c.multi_add(items)\n\n with pytest.raises(Exception) as IX:\n assert c.get(150, 30) == b\"50|30\"\n\n with pytest.raises(Exception) as IX:\n assert c.get(50, 130) == b\"50|30\"\n\ndef test_chain_pre_commit():\n c = Chain()\n items = [\"main\"]\n\n def add_aux_data(block):\n block.aux = [\"aux\"]\n\n c.multi_add(items, pre_commit_fn=add_aux_data)\n block = c.store[c.head]\n\n assert block.items == [\"main\"]\n assert block.aux == [\"aux\"]\n assert block.hash() in c.store\n\ndef test_chain_default_store():\n c = Chain()\n c.multi_add([\"test\"])\n assert c.get(0, 0) is not None\n\n c2 = Chain()\n assert c2.get(0, 0) is None\n\ndef test_chain_empty_store():\n store = {}\n c = Chain(store)\n c.multi_add([\"test\"])\n assert c.get(0, 0) == \"test\"\n\n c2 = Chain(store, root_hash=c.head)\n assert c2.get(0, 0) == \"test\"\n\n","sub_path":"hippiehug-package/tests/test_chain.py","file_name":"test_chain.py","file_ext":"py","file_size_in_byte":3360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"625646047","text":"from datetime import timedelta\r\nfrom typing import Optional\r\nfrom fastapi import FastAPI\r\nfrom pydantic import BaseModel\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nimport joblib\r\n\r\napp = FastAPI()\r\nmodel_info = joblib.load(\"./model_info.sav\")\r\n\r\nclass Appraisal(BaseModel):\r\n town: str\r\n build_area_adj: float\r\n park_area: float\r\n house_year: float\r\n trans_floor: int\r\n total_floor: int\r\n x: float\r\n y: float\r\n\r\n@app.post(\"/find-similar-1model/\")\r\ndef get_top6_case(appraisal:Appraisal):\r\n \"\"\"find 6 similar house\"\"\"\r\n\r\n # used for post-preprocessing\r\n kneighbors = 100\r\n town = appraisal.town\r\n X_test = [[appraisal.build_area_adj, appraisal.park_area, appraisal.house_year, appraisal.trans_floor, appraisal.total_floor, appraisal.x, appraisal.y]]\r\n \r\n \r\n # read model info\r\n knn_model = model_info[\"model\"]\r\n scaler = model_info[\"scaler\"]\r\n y_train = model_info[\"seq\"]\r\n price_std_dict = model_info[\"price_std\"]\r\n\r\n # Apply minmaxScaler\r\n X_test_norm = scaler.transform(X_test)\r\n # Get top-100 recommeded house id\r\n neighbors_id = knn_model.kneighbors(X_test_norm, kneighbors, False)\r\n\r\n # mapping neighbors_id to 1D array\r\n # get top100 seq_no\r\n seq_no = y_train[neighbors_id[0]] \r\n\r\n \"\"\"post-processing to get top6 case from raw dataframe\"\"\"\r\n\r\n # Keep the order same as top 100 results\r\n trade_data = model_info[\"trade_data\"]\r\n top100 = trade_data.iloc[pd.Index(trade_data['seq_no']).get_indexer(seq_no)]\r\n\r\n level_price = top100.iloc[0].build_u_price_adj\r\n price_filter = model_info[\"price_filter\"]\r\n\r\n # Filter ± town std of the first reco house price from top-100 reco data\r\n if price_filter == \"town_std\":\r\n town = top100.iloc[0].town\r\n \r\n # if appraisal town isn't seen in the training data\r\n # then we give it 5w as town std\r\n if town not in price_std_dict.keys():\r\n std = 5\r\n\r\n # if town std is nan\r\n # then we give it 5w as town std\r\n else:\r\n std = 5 if np.isnan(price_std_dict[town]) else price_std_dict[town]\r\n new_topn = top100[(top100.build_u_price_adj <= level_price + std) & (top100.build_u_price_adj >= level_price - std)]\r\n\r\n elif price_filter == \"none\":\r\n new_topn = top100\r\n\r\n else:\r\n price_filter = float(price_filter)\r\n # Filter (ex. ±7.5w) of the first reco house price from top-100 reco data\r\n if price_filter >= 1:\r\n new_topn = top100[(top100.build_u_price_adj <= level_price + price_filter) & (top100.build_u_price_adj >= level_price - price_filter)]\r\n \r\n # Filter (ex. ±10%) of the first reco house price from top-100 reco data\r\n if price_filter > 0 and price_filter < 1:\r\n new_topn = top100[(top100.build_u_price_adj <= level_price * (1 + price_filter)) & (top100.build_u_price_adj >= level_price * (1 - price_filter))]\r\n\r\n \r\n # Select top-6 houses as final reco\r\n new_reco6 = new_topn[[\"seq_no\",\r\n \"build_u_price_adj\",\r\n \"town\",\r\n \"build_type\",\r\n \"build_area_adj\",\r\n \"park_area\",\r\n \"house_year\",\r\n \"trans_floor\",\r\n \"total_floor\",\r\n \"x\",\r\n \"y\"]][:6]\r\n\r\n return {\"detail\":new_reco6.to_dict(\"records\")}","sub_path":"find_similar_1model.py","file_name":"find_similar_1model.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"197285241","text":"# Copyright 2013 Google Inc. All Rights Reserved.\n\n\"\"\"Creates a new Cloud SQL instance.\"\"\"\nfrom apiclient import errors\n\nfrom googlecloudsdk.calliope import arg_parsers\nfrom googlecloudsdk.calliope import base\nfrom googlecloudsdk.calliope import exceptions\nfrom googlecloudsdk.core import resources\nfrom googlecloudsdk.sql import util\n\n\nclass Create(base.Command):\n \"\"\"Creates a new Cloud SQL instance.\"\"\"\n\n @staticmethod\n def Args(parser):\n \"\"\"Args is called by calliope to gather arguments for this command.\n\n Please add arguments in alphabetical order except for no- or a clear-\n pair for that argument which can follow the argument itself.\n Args:\n parser: An argparse parser that you can use it to add arguments that go\n on the command line after this command. Positional arguments are\n allowed.\n \"\"\"\n parser.add_argument(\n '--activation-policy',\n required=False,\n choices=['ALWAYS', 'NEVER', 'ON_DEMAND'],\n default='ON_DEMAND',\n help='The activation policy for this instance. This specifies when the '\n 'instance should be activated and is applicable only when the '\n 'instance state is RUNNABLE.')\n parser.add_argument(\n '--assign-ip',\n required=False,\n action='store_true',\n help='Specified if the instance must be assigned an IP address.')\n parser.add_argument(\n '--authorized-gae-apps',\n required=False,\n nargs='+',\n type=str,\n default=[],\n help='List of App Engine app IDs that can access this instance.')\n parser.add_argument(\n '--authorized-networks',\n required=False,\n nargs='+',\n type=str,\n default=[],\n help='The list of external networks that are allowed to connect to the'\n ' instance. Specified in CIDR notation, also known as \\'slash\\' '\n 'notation (e.g. 192.168.100.0/24).')\n parser.add_argument(\n '--backup-start-time',\n required=False,\n help='The start time of daily backups, specified in the 24 hour format '\n '- HH:MM, in the UTC timezone.')\n parser.add_argument(\n '--no-backup',\n required=False,\n action='store_true',\n help='Specified if daily backup should be disabled.')\n parser.add_argument(\n '--database-version',\n required=False,\n choices=['MYSQL_5_5', 'MYSQL_5_6'],\n default='MYSQL_5_5',\n help='The database engine type and version. Can be MYSQL_5_5 or '\n 'MYSQL_5_6.')\n parser.add_argument(\n '--enable-bin-log',\n required=False,\n action='store_true',\n help='Specified if binary log should be enabled. If backup '\n 'configuration is disabled, binary log must be disabled as well.')\n parser.add_argument(\n '--follow-gae-app',\n required=False,\n help='The App Engine app this instance should follow. It must be in '\n 'the same region as the instance.')\n parser.add_argument(\n '--gce-zone',\n required=False,\n help='The preferred Compute Engine zone (e.g. us-central1-a, '\n 'us-central1-b, etc.).')\n parser.add_argument(\n 'instance',\n help='Cloud SQL instance ID.')\n parser.add_argument(\n '--pricing-plan',\n '-p',\n required=False,\n choices=['PER_USE', 'PACKAGE'],\n default='PER_USE',\n help='The pricing plan for this instance.')\n parser.add_argument(\n '--region',\n required=False,\n choices=['us-east1', 'europe-west1'],\n default='us-east1',\n help='The geographical region. Can be us-east1 or europe-west1.')\n parser.add_argument(\n '--replication',\n required=False,\n choices=['SYNCHRONOUS', 'ASYNCHRONOUS'],\n default='SYNCHRONOUS',\n help='The type of replication this instance uses.')\n parser.add_argument(\n '--require-ssl',\n required=False,\n action='store_true',\n help='Specified if users connecting over IP must use SSL.')\n parser.add_argument(\n '--tier',\n '-t',\n required=False,\n default='D1',\n help='The tier of service for this instance, for example D0, D1.')\n parser.add_argument(\n '--database-flags',\n required=False,\n nargs='+',\n action=arg_parsers.AssociativeList(),\n help='A space-separated list of database flags to set on the instance. '\n 'Use an equals sign to separate flag name and value. Flags without '\n 'values, like skip_grant_tables, can be written out without a value '\n 'after, e.g., `skip_grant_tables=`. Use on/off for '\n 'booleans. View the Instance Resource API for allowed flags. '\n '(e.g., `--database-flags max_allowed_packet=55555 skip_grant_tables= '\n 'log_output=1`)')\n\n def SetDatabaseFlags(self, settings, database_flags):\n flags_list = []\n\n for (name, value) in database_flags.items():\n if value:\n flags_list.append({'name': name, 'value': value})\n else:\n flags_list.append({'name': name})\n\n settings['databaseFlags'] = flags_list\n\n def Run(self, args):\n \"\"\"Creates a new Cloud SQL instance.\n\n Args:\n args: argparse.Namespace, The arguments that this command was invoked\n with.\n\n Returns:\n A dict object representing the operations resource describing the create\n operation if the create was successful.\n Raises:\n HttpException: A http error response was received while executing api\n request.\n ToolException: An error other than http error occured while executing the\n command.\n \"\"\"\n sql = self.context['sql']\n instance_id = util.GetInstanceIdWithoutProject(args.instance)\n project_id = util.GetProjectId(args.instance)\n # TODO(user): as we deprecate P:I args, simplify the call to .Parse().\n instance_ref = resources.Parse(\n instance_id, collection='sql.instances',\n params={'project': project_id})\n activation_policy = args.activation_policy\n assign_ip = args.assign_ip\n authorized_gae_apps = args.authorized_gae_apps\n authorized_networks = args.authorized_networks\n database_flags = args.database_flags\n database_version = args.database_version\n enable_bin_log = args.enable_bin_log\n follow_gae_app = args.follow_gae_app\n gce_zone = args.gce_zone\n pricing_plan = args.pricing_plan\n region = args.region\n replication = args.replication\n require_ssl = args.require_ssl\n tier = args.tier\n\n backup_start_time = args.backup_start_time\n no_backup = args.no_backup\n settings = {}\n settings['tier'] = tier\n settings['pricingPlan'] = pricing_plan\n settings['replicationType'] = replication\n settings['activationPolicy'] = activation_policy\n settings['authorizedGaeApplications'] = authorized_gae_apps\n location_preference = {}\n if follow_gae_app:\n location_preference['followGaeApplication'] = follow_gae_app\n if gce_zone:\n location_preference['zone'] = gce_zone\n settings['locationPreference'] = location_preference\n ip_configuration = {'enabled': assign_ip,\n 'requireSsl': require_ssl,\n 'authorizedNetworks': authorized_networks}\n settings['ipConfiguration'] = [ip_configuration]\n\n if no_backup:\n if backup_start_time or enable_bin_log:\n raise exceptions.ToolException('Argument --no-backup not allowed with'\n ' --backup-start-time or '\n '--enable_bin_log')\n settings['backupConfiguration'] = [{'startTime': '00:00',\n 'enabled': 'False'}]\n\n if backup_start_time:\n backup_config = [{'startTime': backup_start_time,\n 'enabled': 'True',\n 'binaryLogEnabled': enable_bin_log}]\n settings['backupConfiguration'] = backup_config\n\n if database_flags:\n self.SetDatabaseFlags(settings, database_flags)\n\n\n body = {\n 'instance': instance_ref.instance,\n 'project': instance_ref.project,\n 'region': region,\n 'databaseVersion': database_version,\n 'settings': settings,\n }\n request = sql.instances().insert(project=instance_ref.project,\n body=body)\n try:\n result = request.execute()\n operations = self.command.ParentGroup().ParentGroup().operations()\n operation = operations.get(instance=str(instance_ref),\n operation=result['operation'])\n return operation\n except errors.HttpError as error:\n raise exceptions.HttpException(util.GetError(error))\n except errors.Error as error:\n raise exceptions.ToolException(error)\n\n def Display(self, unused_args, result):\n \"\"\"Display prints information about what just happened to stdout.\n\n Args:\n unused_args: The same as the args in Run.\n result: A dict object representing the operations resource describing the\n create operation if the create was successful.\n \"\"\"\n printer = util.PrettyPrinter(0)\n printer.Print('Result of the create operation:')\n printer.PrintOperation(result)\n","sub_path":"old/google-cloud-sdk/lib/googlecloudsdk/sql/tools/instances/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":9241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"108909288","text":"from random import randrange, shuffle\n\ndef quicksort(list, start, end):\n # this portion of list has been sorted\n if start >= end:\n return\n\n # select random element to be pivot\n pivot_index = randrange(start, end + 1)\n pivot_element = list[pivot_index]\n\n # swap random element with last element in sub-lists\n list[end], list[pivot_index] = list[pivot_index], list[end]\n\n # tracks all elements which should be to left (lesser than) pivot\n less_than_pointer = start\n \n for i in range(start, end):\n # we found an element out of place\n if list[i] < pivot_element:\n # swap element to the right-most portion of lesser elements\n list[i], list[less_than_pointer] = list[less_than_pointer], list[i]\n # tally that we have one more lesser element\n less_than_pointer += 1\n # move pivot element to the right-most portion of lesser elements\n list[end], list[less_than_pointer] = list[less_than_pointer], list[end]\n # recursively sort left and right sub-lists\n quicksort(list, start, less_than_pointer - 1)\n quicksort(list, less_than_pointer + 1, end)\n\n\n \n#Tests \nlist = [1,2,3,4,5,6,7,8,9,10]\nshuffle(list)\nprint(list)\nquicksort(list, 0, len(list) -1)\nprint(list)\n","sub_path":"quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"615067423","text":"# -*- coding: utf-8 -*-\nimport utils, matrix_builder\nversion = 'GLO_average'\nsystem_model = 'Allocation, cut-off'\nfolder = utils.version_system_model_path(version, system_model)\nA, B, C, indexes, Z = utils.load_matrices(folder)\nfolder = r'C:\\releases\\other_versions\\GLO_average\\Allocation, cut-off'\nmatrix_builder.calculate_scalings(indexes, A, Z, folder)\nmatrix_builder.calculate_g(folder, indexes, B)\nmatrix_builder.calculate_h(folder, indexes, C)\nmatrix_builder.scores_per_indicator(version, system_model)","sub_path":"projects/version4_projects/GLO_average/05_scores.py","file_name":"05_scores.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"43787864","text":"from setuptools import setup, find_packages\nname=\"sfdockertest\"\nversion=\"0.0.4\"\nauthor=\"Adrian Galera\"\nauthor_email=\"agalera@sensefields.com\"\ndescription=\"Wrapper to start/stop docker containers with port forwarding capabilities\"\nlicense=\"Apache\"\nkeywords=\"docker test\"\nurl=\"https://github.com/adriangalera/pythondockertest\"\nclassifiers=[\n\"Development Status :: 1 - Planning\",\n\"Development Status :: 2 - Pre-Alpha\",\n\"Development Status :: 3 - Alpha\",\n\"Development Status :: 4 - Beta\",\n\"Development Status :: 5 - Production/Stable\",\n#\"Development Status :: 6 - Mature\",\n#\"Development Status :: 7 - Inactive\"\n]\ndependencies = [\"docker\"]\ntest_dependencies=[\"redis\", \"pymysql\"]\nsetup(name = name\n ,version = version\n,author = author\n,author_email = author_email\n,description = description\n,license = license\n,keywords = keywords\n,url = url\n,long_description=description\n,classifiers=classifiers\n,install_requires=dependencies\n,extras_require={\"test\":test_dependencies},\npy_modules = ['sfdockertest']\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"151141443","text":"# !/usr/bin/env python\n# -*- coding:utf-8 -*-\n# author;鸿\nfrom xpinyin import Pinyin\nimport urllib.request as r\nimport json\nimport os\nimport re\n\n\ndef get_jsondata(city):\n p = Pinyin()\n city_pinyin = p.get_pinyin(city, '')\n url = 'http://api.openweathermap.org/data/2.5/forecast?q={},cn&mode=json&lang=zh_cn&&APPID=6a67ed641c0fda8b69715c43518b6996&units=metric'.format(\n city_pinyin)\n data = r.urlopen(url).read().decode('utf-8')\n data = json.loads(data)\n return data\n\n\nif __name__ == \"__main__\":\n city_history = []\n data = {}\n if os.path.exists('.\\city_temp'):\n lists = os.listdir('.\\city_temp')\n if lists == []:\n print('您没有历史查询记录!')\n else:\n for i in lists:\n city_history.append(i[:2])\n print('您的历史查询城市为:' + str(city_history))\n city = str(input('请输入需要查询的城市:'))\n if not os.path.exists('.\\city_temp'):\n os.mkdir('.\\city_temp')\n if not os.path.exists('.\\city_temp\\\\' + city + '.txt'):\n data = get_jsondata(city)\n with open('.\\city_temp\\\\' + city + '.txt', 'w') as f:\n f.write(str(data))\n else:\n if not os.path.exists('.\\city_temp\\\\' + city + '.txt'):\n data = get_jsondata(city)\n with open('.\\city_temp\\\\' + city + '.txt', 'w') as f:\n f.write(str(data))\n else:\n with open('.\\city_temp\\\\' + city + '.txt', 'r') as f:\n data = f.read()\n data = eval(data)\n i = 0\n j = 0\n temp = []\n while j < 5:\n ls = data['list'][i]\n if ls['dt_txt'].endswith('18:00:00'):\n j += 1\n temp.append(ls['main']['temp'])\n print(ls['dt_txt'][:-3] + city +'的温度为:' + str(ls['main']['temp']))\n print(ls['dt_txt'][:-3] + city +'的天气情况为:' + str(ls['weather'][0]['description']))\n print('-' * 50)\n i += 1\n\n","sub_path":"阶段一/保存json数据.py","file_name":"保存json数据.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"313512464","text":"from Adafruit_IO import Client\n\nimport logging\n\nfrom messagebus import MessageHandler\n\nclass AdafruitHandler(MessageHandler):\n\n def __init__(self, api_key, feed, message_field = \"value\"):\n self.message_field = message_field\n self.client = Client(api_key)\n self.feed = feed\n\n def send_value(self, value):\n\n logging.debug(\"Send value %s to feed %s\", value, self.feed)\n self.client.send(self.feed, value)\n\n def handle(self, message):\n\n value = message.data[self.message_field]\n self.send_value(value)\n\n\nif __name__ == \"__main__\":\n\n adafruit = AdafruitHandler(\"\", \"\")\n\n adafruit.send_value(78)\n","sub_path":"messagebus/handlers/adafruit.py","file_name":"adafruit.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"152533977","text":"import uuid\nfrom task import RsyncTask, MergeTask\nfrom executor import tasks\nfrom bottle import route, request, HTTPError\n\n\n@route('/tasks', method='GET')\ndef get_pending_tasks():\n return \"Not implemented yet.\"\n\n\nrsync_tasks = {}\n\n\n@route('/tasks/rsync_request', method='POST')\ndef rsync_request():\n \"\"\" Create a rsync task and wait until the DELETE http method is\n called on the same url to continue.\n \"\"\"\n\n project_name = request.params.project_name\n username = request.params.username\n server_host = request.params.server_host\n\n # Create the rsync task.\n rsync_task = RsyncTask()\n tasks.put(rsync_task)\n\n # Associate a uuid to the rsync task and store it into rsync_tasks\n # dict.\n req_id = str(uuid.uuid4())\n rsync_tasks[req_id] = rsync_task\n\n # Return all the necessary information to the baboon client.\n ret = {'req_id': req_id,\n 'remote_dir': 'root@%s:/tmp/%s/%s/' % \\\n (server_host, project_name, username)\n }\n\n # Return the dict\n return ret\n\n\n@route('/tasks/rsync_request/:req_id', method='DELETE')\ndef rsync_request_finished(req_id):\n \"\"\" Throw an event to say the rsync_task associated to the\n req_id is finished.\n \"\"\"\n\n try:\n rsync_task = rsync_tasks[req_id]\n\n # Throw the event.\n rsync_task.ready.set()\n except:\n raise HTTPError\n\n return 'OK\\n'\n\n\n@route('/tasks', method='POST')\ndef create_task():\n project_name = request.params.project_name\n username = request.params.username\n\n if not project_name or not username:\n # error\n return \"Error\"\n\n new_task = MergeTask(project_name, username)\n tasks.put(new_task)\n\n return 'OK\\n'\n","sub_path":"server/baboonsrv/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"95234306","text":"#!/usr/local/bin/python\n\nfrom __future__ import print_function\nimport numpy as np\nimport logging\nimport matplotlib.pyplot as plt\n\n# SR(M) / 200 - k - 1\nzeroes = 0.0\nones = 0.0\ntwos = 0.0\nthrees = 0.0\nfours = 0.0\nfives = 0.0\nsixes = 0.0\nsevens = 0.0\neights = 0.0\n\ndef main():\n\n means_file = \"v_vals.npy\"\n hypervar_file = \"v_hypervariations.data\"\n\n\n #v_vals = np.load(means_file)\n #mean = np.average(v_vals)\n hvars = read_hypervars(hypervar_file)\n training_positions = np.array([hvar[\"positions\"] for hvar in hvars[:int(0.66*len(hvars))]])\n testing_positions = np.array([hvar[\"positions\"] for hvar in hvars[int(0.66*len(hvars)) + 1:]])\n\n\n mean = np.average([np.average(training_position) for training_position in training_positions])\n # For every hvar, calculate the mean square error\n naive_mse = map(lambda positions: calc_mse_naive(positions, mean), testing_positions)\n print(\"Naive MSE Total: {}\".format(np.sum(naive_mse)))\n print(\"Naive MSE Mean: {}\".format(np.average(naive_mse)))\n\n\n improving = True\n poly_size = 0\n best_adj = 10000\n xs = []\n ys = []\n while improving or poly_size < 5:\n print(\"Poly size = {}\".format(poly_size))\n poly = get_poly(training_positions, poly_size)\n\n mses = map(lambda positions: calc_mse(positions, poly), testing_positions)\n print(\"Poly MSE Total: {}\".format(np.sum(mses)))\n print(\"Poly MSE Mean: {}\".format(np.average(mses)))\n\n mse_adj = adj_mse(mses, testing_positions, poly_size)\n print(\"SRM/(Len - k - 1) Total: {}\".format(np.sum(mse_adj)))\n print(\"SRM/(Len - k - 1) Mean: {}\".format(np.average(mse_adj)))\n\n\n xs.append(poly_size)\n ys.append(np.average(mse_adj))\n\n poly_size += 1\n if np.average(mse_adj) >= best_adj:\n improving = False\n else:\n improving = True\n best_adj = np.average(mse_adj)\n\n\n plt.plot(xs, ys)\n plt.show()\n\ndef get_poly(positions, poly_size):\n \"\"\" Positions is [[0, 1, 0 ...] ...]\n poly_size is int\"\"\"\n\n #print(positions)\n polys = []\n for position_ar in positions:\n polys.append(np.polynomial.polynomial.polyfit(\n [x for x in range(len(position_ar))], position_ar, poly_size))\n\n totals = []\n for poly in polys:\n for i in range(len(poly)):\n try:\n totals[i] += poly[i]\n except IndexError:\n totals.append(poly[i])\n # will need an except here\n return [float(tot) / len(positions) for tot in totals]\n #return np.polynomial.polynomial.polyfit(\n # [[x for x in range(len(positions[i]))] for i in range(len(positions))], positions, poly_size)\n\ndef adj_mse(mses, positions, k):\n adj = []\n for (mse, position) in zip(mses, positions):\n if len(position) > k + 1:\n adj.append(float(mse) / (len(position) - k - 1))\n return adj\n\ndef get_running_avg(hvars, start, length):\n n = 0\n total = 0.0\n for j in range(int(start - ((length-1)/2)), int(start + ((length-1)/2)) + 1):\n if j > 0 and j < len(hvars):\n n += 1\n total += hvars[j]\n\n return total / n\n\ndef calc_mse_naive(positions, mean):\n tot = float(0.0)\n for i in range(len(positions)):\n try:\n tot += (positions[i] - mean)**2\n #tot += (float(get_running_avg(positions, i, length)) - mean)**2\n except ZeroDivisionError:\n pass\n \n return tot\n\ndef calc_mse(positions, poly):\n tot = float(0.0)\n \n #positions = hvar[\"positions\"]\n\n for i in range(len(positions)):\n try:\n prediction = 0.0\n for j in range(len(poly)):\n val = poly[j]\n for k in range(j):\n val *= i\n prediction += val\n tot += (prediction - positions[i])**2\n except ZeroDivisionError:\n pass\n\n return tot\n\ndef interpolate(vals, final_length):\n final_vals = []\n #print(\"Interpolating from length of {0:d} to {1:d}\".format(len(vals), final_length))\n if len(vals) > final_length:\n frequency_to_remove = len(vals)/(final_length - len(vals))\n for i in range(len(vals)):\n if not i % frequency_to_remove == 0:\n final_vals.append(vals[i])\n\n elif len(vals) < final_length:\n frequency_to_repeat = len(vals)/(final_length - len(vals))\n for i in range(len(vals)):\n final_vals.append(vals[i])\n if i % frequency_to_repeat == 0:\n final_vals.append(vals[i])\n else:\n final_vals = vals\n return final_vals\n\ndef read_hypervars(fname):\n \"\"\" returns a list of hypervariations.\n Each hypervariation is a dict of {\"header\", \"positions\"}\n Header is a string.\n Positions is a list of floats. \"\"\"\n\n logging.info(\"Reading hypervariations from {0:s}\".format(fname))\n hvars = []\n with open(fname, 'r') as h_file:\n i = 1\n for line in h_file:\n if '>' in line:\n my_hvar = {\"header\" : line.strip()}\n \n else:\n if my_hvar is not None:\n positions = []\n line_pos = line.split(' ')\n for pos in line_pos:\n positions.append(float(pos))\n \n my_hvar[\"positions\"] = positions\n hvars.append(my_hvar)\n print(\"Read hvar line #{0:d}/26307 = {1:f}%\".format(i, i/float(263.07)), end=\"\\r\")\n\n i += 1\n\n logging.info(\"Read {0:d} hypervariations\".format(len(hvars)))\n print(\"\\nRead {0:d} hypervariations\".format(len(hvars)))\n\n return hvars\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"src/hypermutation/b_analysis/6-mse/calc_mse.py","file_name":"calc_mse.py","file_ext":"py","file_size_in_byte":5731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"5286197","text":"from eve import Eve\nfrom computer import Computer\nimport platform\nimport psutil\nimport json\nfrom flask import Response\n \napp = Eve()\n\n@app.route('/processor', methods = ['GET'])\ndef processor():\n \n computerDetails = Computer(platform.processor(),\n psutil.virtual_memory(), \n psutil.disk_usage('/'), \n platform.version(), \n platform.system(), \n platform.node(), \n platform.machine(), \n psutil.cpu_percent())\n \n sdata = json.dumps(computerDetails.__dict__)\n \n response = Response()\n response.headers[\"status\"] = 200\n response.headers[\"Content-Type\"] = \"application/json; charset=utf-8\"\n response.data= sdata\n \n return response\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"rest/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"299184481","text":"\n\nfrom xai.brain.wordbase.verbs._venerate import _VENERATE\n\n#calss header\nclass _VENERATING(_VENERATE, ):\n\tdef __init__(self,): \n\t\t_VENERATE.__init__(self)\n\t\tself.name = \"VENERATING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"venerate\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_venerating.py","file_name":"_venerating.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"332238462","text":"# Differential analysis between TF activity\n\nimport os\nimport pandas\nimport numpy as np\nfrom scipy.stats.stats import pearsonr\nimport matplotlib.pyplot as plt\nfrom Functions_misc import list_search_pattern, angle\n\nroot_dir = \"/home/marie/Documents/\"\n\n################################################################################\n# Functions\n\n\n################################################################################\n# Main\n################################################################################\n\n# STEP 1\n################################################################################\n\n# input_files = os.listdir(\"%sPROCESSED/Step_5_mRNA/VIPER_input/viper_supervised\"\n# %root_dir)\n# viper_files = list_search_pattern(\"^M.{2}_.{4,7}_supervised.tsv\",input_files)\n# viper_files = list_search_pattern(\"^M01_BT20_supervised.tsv\",input_files)\n#\n# print(viper_files)\n#\n# for f in viper_files:\n# file_ID = f.split(\"_\")[0]\n# file_CL = f.split(\"_\")[1]\n#\n# os.chdir(\"%sPROCESSED/Step_5_mRNA/VIPER_input/viper_supervised\" %root_dir)\n# viper = pandas.read_table(f, index_col=0)\n#\n# conditions = list(set([\"_\".join(x.split(\"_\")[:-1]) for x in viper.columns.values]))\n# replicates_metric = pandas.DataFrame(np.nan, index = [\"correlation\",\"angle\"],\n# columns = conditions)\n# # GET THE QUALITY METRIC BETWEEN REPLICATES\n# for c in conditions:\n# col_names = list_search_pattern(\"^%s_.{3}$\" %c, viper.columns.values)\n#\n# if len(col_names) > 1:\n#\n# replicates_metric.loc[\"correlation\",c] = pearsonr(viper.loc[:,col_names[0]].tolist(),\n# viper.loc[:,col_names[1]].tolist())[0]\n#\n# replicates_metric.loc[\"angle\",c] = angle(viper.loc[:,col_names[0]].tolist(),\n# viper.loc[:,col_names[1]].tolist())\n#\n#\n# anti_correlated = replicates_metric.loc[:, replicates_metric.loc[\"correlation\", :]<-0.3]\n#\n# # GET THE EXPRESSION FILE\n# input_files = os.listdir(\"%sPROCESSED/Step_5_mRNA/VIPER_input\"\n# %root_dir)\n# expr_files = list_search_pattern(\"%s_filtered.tsv\"%file_ID,input_files)\n# design_files = list_search_pattern(\"%s_design.tsv\"%file_ID,input_files)\n# os.chdir(\"%sPROCESSED/Step_5_mRNA/VIPER_input\" %root_dir)\n# expr = pandas.read_table(expr_files[0], index_col=0)\n# design = pandas.read_table(design_files[0], index_col=0)\n#\n# # CREATE EXPRESSION FILE WITH NOISE 1 PER CONDITION REPLICATE\n# for c in anti_correlated.columns.values.tolist():\n# print(c)\n# wells = design.loc[design.loc[:,\"Conditions\"] == \"%s_BT20\"%c, :].index.tolist()\n# control_wells = design.loc[design.loc[:,\"Conditions\"] == \"-_0.000_BT20\", :].index.tolist()\n# for rep in wells:\n# design_select = design.loc[[rep]+control_wells,:]\n# expr_select = expr.loc[:,[rep]+control_wells]\n#\n# for sample in range(0,100):\n# design_select.loc[\"%dN%s\"%(sample,rep),:]=design_select.loc[rep,\"Conditions\"]\n# design_select.loc[\"%dN\"%(sample),:]=\"noise_%s\"%sample\n# original = expr_select.loc[:,rep].tolist()\n# # print(np.percentile(original,10))\n# noise = np.random.uniform(-np.percentile(original,10), np.percentile(original,10),\n# len(original))\n# signal = original + noise\n# expr_select[\"%dN%s\"%(sample,rep)] = pandas.Series(signal, index=expr_select.index)\n# expr_select[\"%dN\"%(sample)] = pandas.Series(noise, index=expr_select.index)\n#\n# os.chdir(\"%sPROCESSED/Step_5_mRNA/VIPER_input/sensibility\" %root_dir)\n# expr_select.to_csv(\"%s_%s_expr.tsv\" %(c,rep), sep=\"\\t\")\n# design_select.to_csv(\"%s_%s_design.tsv\" %(c,rep), sep=\"\\t\")\n\n\n# STEP 2\n################################################################################\ninput_files = os.listdir(\"%sPROCESSED/Step_5_mRNA/VIPER_input/sensibility\"\n %root_dir)\nviper_files = list_search_pattern(\"viper.tsv\", input_files)\nprint(viper_files)\n\nfor f in viper_files:\n # compute correlation\n os.chdir(\"%sPROCESSED/Step_5_mRNA/VIPER_input/sensibility\" %root_dir)\n viper = pandas.read_table(f, index_col=0)\n expr_files = list_search_pattern(\"%s_expr.tsv\" %\"_\".join(f.split(\"_\")[:-1]), input_files)\n expr = pandas.read_table(expr_files[0], index_col=0)\n\n correlation = pandas.DataFrame(0, index = viper.columns.values[:-1],\n columns = [\"correlation\",\"angle\"])\n for n in correlation.index.tolist():\n correlation.loc[n,\"correlation\"] = pearsonr(viper.iloc[:,0].tolist(), viper.loc[:,n].tolist())[0]\n correlation.loc[n,\"angle\"] = angle(viper.iloc[:,0].tolist(), viper.loc[:,n].tolist())\n\n correlation = correlation.sort_values(by=\"correlation\", axis=0, ascending=False)\n\n # anti = correlation.loc[correlation.loc[:,\"correlation\"] < -0.25,:]\n wells = [x[:-3] for x in correlation.index.tolist()[1:]]\n\n\n print(correlation)\n\n gene_correlation = pandas.DataFrame(0, index = expr.index,\n columns = [\"gene_correlation\",\"gene_angle\"])\n\n\n expr_select = expr.loc[:,wells]\n\n for gene in expr.index.tolist():\n gene_correlation.loc[gene,\"gene_correlation\"] = pearsonr(correlation.iloc[1:,0].tolist(),\n expr_select.loc[gene,:].tolist())[0]\n gene_correlation.loc[gene,\"gene_angle\"] = angle(correlation.iloc[1:,1].tolist(),\n expr_select.loc[gene,:].tolist())\n\n gene_correlation = gene_correlation.sort_values(by=\"gene_correlation\",axis=0,ascending=False)\n plt.hist(gene_correlation.loc[:,\"gene_correlation\"].tolist())\n # plt.title(\"Correlation between replicates %s\" %(file_ID))\n plt.ylabel(\"Nb genes (Total = %d)\" %len(expr.index.tolist()))\n plt.xlabel(\"correlation gene noise and viper output\")\n plt.show()\n","sub_path":"SRC/Python/Sensibility_viper.py","file_name":"Sensibility_viper.py","file_ext":"py","file_size_in_byte":6213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"578961281","text":"from flask import Flask, jsonify, request, render_template\nimport requests, json, os\n\nimport logging, sys\nlogging.basicConfig(stream=sys.stderr)\n\ntmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')\n\napp = Flask(__name__, tmpl_dir)\n\n@app.route('/process', methods=['POST'])\ndef why():\n print >> sys.stderr, \"Heere!\"\n #data = jsonify(request.get_json(force=True))\n #url = \"http://api.openweathermap.org/data/2.5/weather?q=\" + data[ + \",nc,us\"\n #reads = requests.get(url)\n #data = json.loads(reads.content)\n\n #weather = data['weather'][0]['main']\n #print(data)\n #return data\n return request\n\n@app.route('/')\ndef home(): # pragma: no cover\n# content = get_file('index.html')\n return render_template('index.html', title='index')\n\n\nif __name__ == '__main__':\n app.run(host='localhost',port=80, debug=True)\n","sub_path":"web-main/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"423945225","text":"import sys\nsys.stdin = open('input.txt', 'r')\n\ndx, dy = [1, 0, -1, 0], [0, 1, 0, -1]\ndef search(i, j, val):\n # for q in range(N):\n # print(visit[q])\n # print()\n visit[i][j] = val\n for d in range(4):\n ni, nj = i+dx[d], j+dy[d]\n if -1 < ni < N and -1 < nj < N:\n nval = val+arr[ni][nj]\n if -1 < visit[N-1][N-1] < nval:\n continue\n if visit[ni][nj] < 0:\n visit[ni][nj] = nval\n search(ni, nj, nval)\n else:\n if nval < visit[ni][nj]:\n visit[ni][nj] = nval\n search(ni, nj, nval)\n\nfor tc in range(1, int(input())+1):\n N = int(input())\n arr = [list(map(int, input())) for _ in range(N)]\n visit = [[-1]*N for _ in range(N)]\n visit[0][0] = 0\n search(0, 0, 0)\n print(f'#{tc} {visit[N-1][N-1]}')","sub_path":"swea/D4/1249.py","file_name":"1249.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"66402701","text":"class Solution:\n def minWindow(self, s: 'str', t: 'str') -> 'str':\n # use a hash-table to store the number of appreances needed in target list\n if not s:\n return \"\"\n from collections import Counter\n # target count\n tcnt = Counter(t)\n targetUniqueL = len(tcnt)\n matched = 0\n # mapping count\n mcnt, n, j = Counter(), len(s), 0\n minL, minWindow = n + 1, \"\"\n # move the left pointer\n for i in range(n):\n # move in\n while j < n and matched < targetUniqueL:\n if s[j] in tcnt:\n mcnt[s[j]] += 1\n if mcnt[s[j]] == tcnt[s[j]]:\n matched += 1\n j += 1\n \n # record/update min window\n if matched == targetUniqueL and j - i < minL:\n minL = j - i\n minWindow = s[i:j]\n # move out\n if s[i] in tcnt:\n if mcnt[s[i]] == tcnt[s[i]]:\n matched -= 1\n mcnt[s[i]] -= 1 \n return minWindow","sub_path":"Python/Data Structure/Sliding Window/Minimum Window Substring.py","file_name":"Minimum Window Substring.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"39325674","text":"#!/usr/bin/env python\nimport numpy as np\nfrom mnist_numpy import read_mnist\nfrom nnet_toolkit import nnet\nfrom nnet_toolkit import select_funcs as sf\nfrom autoconvert import autoconvert\nimport sys\nimport time\n\n#h5py used for saving results to a file\nimport h5py\n\n#Get the parameters file from the command line\n#use mnist_train__forget_params.py by default (no argument given)\nif(len(sys.argv) > 1):\n params_file = sys.argv[1]\nelse:\n params_file = 'mnist_train_forget_params.py'\n \np = {}\nexecfile(params_file,p)\n\n#grab extra parameters from command line\nfor i in range(2,len(sys.argv)):\n (k,v) = sys.argv[i].split('=')\n v = autoconvert(v)\n p[k] = v\n print(str(k) + \":\" + str(v))\n \ndef load_data(digits,dataset,p):\n images, labels = read_mnist(digits,dataset,path=p['data_dir']);\n labels = labels.transpose()[0] #put labels in an array\n images = np.float64(images)\n #(normalize between 0-1)\n images /= 255.0 \n #normalize between -1 and 1 for hyperbolic tangent\n images = images - 0.5;\n images = images*2.0; \n \n train_size = labels.shape[0]\n sample_data = images.reshape(train_size,28*28)\n\n #build classification data in the form of neuron outputs\n class_data = np.ones((labels.shape[0],10))*p['incorrect_target']\n for i in range(labels.shape[0]):\n class_data[i,labels[i]] = p['correct_target'];\n\n if(p['use_float32']):\n sample_data = np.asarray(sample_data,np.float32)\n class_data = np.asarray(class_data,np.float32)\n \n return (sample_data,class_data)\n\n(sample_data,class_data) = load_data(range(10),\"training\",p)\ntrain_size = sample_data.shape[0]\n\n(test_data,test_class) = load_data(range(10),\"testing\",p)\ntest_size = test_data.shape[0]\n\nnum_hidden = p['num_hidden']\n\ntraining_epochs = p['training_epochs']\n\nminibatch_size = p['minibatch_size']\n\nlayers = [];\nlayers.append(nnet.layer(28*28))\nlayers.append(nnet.layer(p['num_hidden'],p['activation_function'],select_func=p['select_func'],\n initialization_scheme=p['initialization_scheme'],\n initialization_constant=p['initialization_constant'],\n dropout=p['dropout'],sparse_penalty=p['sparse_penalty'],\n sparse_target=p['sparse_target'],use_float32=p['use_float32'],\n momentum=p['momentum'],maxnorm=p['maxnorm'],step_size=p['learning_rate'],\n rms_prop_rate=p['rms_prop_rate']))\n\n#Add 2nd and 3rd hidden layers if there are parameters indicating that we should\nif(p.has_key('num_hidden2')):\n layers.append(nnet.layer(p['num_hidden2'],p['activation_function2'],select_func=p['select_func2'],\n initialization_scheme=p['initialization_scheme2'],\n initialization_constant=p['initialization_constant2'],\n dropout=p['dropout2'],sparse_penalty=p['sparse_penalty2'],\n sparse_target=p['sparse_target2'],use_float32=p['use_float32'],\n momentum=p['momentum'],maxnorm=p['maxnorm'],step_size=p['learning_rate'],\n rms_prop_rate=p['rms_prop_rate']))\n\nif(p.has_key('num_hidden3')):\n layers.append(nnet.layer(p['num_hidden3'],p['activation_function3'],select_func=p['select_func3'],\n initialization_scheme=p['initialization_scheme3'],\n initialization_constant=p['initialization_constant3'],\n dropout=p['dropout3'],sparse_penalty=p['sparse_penalty3'],\n sparse_target=p['sparse_target3'],use_float32=p['use_float32'],\n momentum=p['momentum'],maxnorm=p['maxnorm'],step_size=p['learning_rate'],\n rms_prop_rate=p['rms_prop_rate']))\n\nlayers.append(nnet.layer(10,p['activation_function_final'],use_float32=p['use_float32'],step_size=p['learning_rate_final']))\n\n\nlearning_rate = p['learning_rate']\n\nnp.random.seed(p['random_seed']);\n\n#init net\nnet = nnet.net(layers,learning_rate)\n\nsave_interval = p['save_interval']\n\nsave_time = time.time()\n\n#these are the variables to save\ntrain_mse_list = [];\ntrain_missed_list = [];\ntrain_missed_percent_list = [];\ntest_mse_list = [];\ntest_missed_list = [];\ntest_missed_percent_list = [];\n\nfor i in range(training_epochs):\n minibatch_count = int(train_size/minibatch_size)\n \n #shuffle data\n rng_state = np.random.get_state();\n np.random.shuffle(sample_data)\n np.random.set_state(rng_state)\n np.random.shuffle(class_data)\n \n #count number of correct\n train_missed = 0;\n train_mse = 0;\n for j in range(minibatch_count+1):\n #grab a minibatch\n net.input = np.transpose(sample_data[j*minibatch_size:(j+1)*minibatch_size])\n classification = np.transpose(class_data[j*minibatch_size:(j+1)*minibatch_size])\n net.feed_forward()\n net.error = net.output - classification\n guess = np.argmax(net.output,0)\n c = np.argmax(classification,0)\n train_missed = train_missed + np.sum(c != guess)\n train_mse = train_mse + np.sum(net.error**2)\n net.back_propagate()\n net.update_weights()\n train_missed_percent = float(train_missed)/float(train_size)\n \n #feed test set through to get test rates\n net.train=False\n net.input = np.transpose(test_data)\n net.feed_forward()\n test_guess = np.argmax(net.output,0)\n c = np.argmax(np.transpose(test_class),0)\n test_missed = np.sum(c != test_guess)\n test_mse = np.sum(net.error**2)\n test_missed_percent = float(test_missed)/float(test_size)\n net.train=True\n \n #log everything for saving\n train_mse_list.append(train_mse)\n train_missed_list.append(train_missed)\n train_missed_percent_list.append(train_missed_percent)\n test_mse_list.append(test_mse)\n test_missed_list.append(test_missed)\n test_missed_percent_list.append(test_missed_percent)\n \n print('epoch ' + str(i) + \": test-missed: \" + str(test_missed) + \" MSE: \" + str(test_mse) + \" percent missed: \" + str(test_missed_percent) + \" train percent missed: \" + str(train_missed_percent));\n #f_out.write(str(train_mse) + \",\" + str(train_missed_percent) + \",\" + str(test_missed_percent) + \"\\n\")\n if(time.time() - save_time > save_interval or i == training_epochs-1):\n print('saving results...')\n f_handle = h5py.File(p['results_dir'] + p['simname'] + p['version'] + '.h5py','w')\n f_handle['train_mse_list'] = np.array(train_mse_list);\n f_handle['train_missed_list'] = np.array(train_missed_list);\n f_handle['train_missed_percent_list'] = np.array(train_missed_percent_list);\n f_handle['test_mse_list'] = np.array(test_mse_list);\n f_handle['test_missed_list'] = np.array(test_missed_list);\n f_handle['test_missed_percent_list'] = np.array(test_missed_percent_list);\n \n #iterate through all parameters and save them in the parameters group\n p_group = f_handle.create_group('parameters');\n for param in p.iteritems():\n #only save the ones that have a data type that is supported\n if(type(param[1]) in (int,float,str)):\n p_group[param[0]] = param[1];\n f_handle.close();\n save_time = time.time();\n \n","sub_path":"mnist_train.py","file_name":"mnist_train.py","file_ext":"py","file_size_in_byte":7335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"462440855","text":"from __future__ import absolute_import, division\nimport numpy as np\nfrom astropy.io import fits\nfrom scipy.signal import fftconvolve as conv\nfrom os import getcwd, mkdir, extsep\nfrom os.path import join, basename, dirname, isdir\nimport matplotlib.pyplot as plt\nfrom matplotlib.gridspec import GridSpec\nfrom matplotlib.widgets import Slider\n\n__author__= \"Juhyung Kang\"\n__email__ = \"jhkang@astro.snu.ac.kr\"\n\ndef runDAVE(data0, output=False, overwrite=False, fwhm=10, adv=1, source=0,\n noise=1, winFunc='Gaussian', outSig=False):\n \"\"\"\n Differential Affine Velocity Estimator for all spatial points.\n This is the python function of dave_multi.pro IDL code written by J. Chae (2009).\n\n Parameters\n ----------\n data0: `~numpy.ndarray` or fits file.\n Three-dimensional input array with shape (nt, ny, nx) or\n fits file with same dimension and shape.\n output: `str`, optional\n The name of the output fits file name.\n If False, it makes OFE directory and write the *_dave.fits file.\n Default is `False`.\n fwhm: `int`, optional\n FWHM of the window function (should be even positive integer)\n\n Returns\n -------\n fits file:\n \"\"\"\n if type(data0) == str:\n data = fits.getdata(data0)\n dirn = join(dirname(data0), 'ofe')\n if not isdir(dirn):\n mkdir(dirn)\n fname = f'{basename(data0).split(extsep)[0]}_dave.fits'\n else:\n data = data0\n dirn = getcwd()\n fname = 'dave.fits'\n if data.ndim != 3:\n raise ValueError('data must be 3-D array.')\n if not output:\n output = join(dirn, fname)\n\n dnt, dny, dnx = data.shape\n psw = adv\n qsw = source\n\n # Construncting window function\n winFunc = winFunc.capitalize()\n h = int(fwhm/2)\n if winFunc == 'Square':\n mf = 1\n else:\n mf = 2\n nx = 2*h*mf+1\n ny = 2*h*mf+1\n x = -(np.arange(nx)-nx//2)\n y = -(np.arange(ny)-ny//2)\n\n if winFunc == 'Square':\n w = np.ones((ny, nx))\n elif winFunc == 'Gaussian':\n w = np.exp(-np.log(2)*((x/h)**2+(y[:,None]/h)**2))\n elif winFunc == 'Hanning':\n w = (1+np.cos(np.pi*x/h/2))*(1+np.cos(np.pi*y/h/2))/4\n else:\n raise ValueError(\"winFunc must be one of ('Square', 'Gaussian', \"\n \"'Hanning')\")\n w /= noise**2\n\n # Construncting coefficent arrays\n im = data\n imT = (np.roll(im, -1, axis=0) - np.roll(im, 1, axis=0))/2\n imY, imX = np.gradient(im, axis=(1, 2))\n\n npar = 6+qsw\n A = np.empty((npar, npar, dnt, dny, dnx))\n A[0,0] = conv(imX*imX, w[None, :, :],\n 'same', axes=(1, 2)) # U0, U0\n A[1,0] = A[0,1] = conv(imY*imX, w[None, :, :],\n 'same', axes=(1, 2)) # V0, U0\n A[1,1] = conv(imY*imY, w[None, :, :],\n 'same', axes=(1, 2)) # V0, V0\n A[2,0] = A[0,2] = conv(imX*imX, x*w[None, :, :],\n 'same', axes=(1, 2)) \\\n + psw*conv(imX*im, w[None, :, :],\n 'same',axes=(1, 2)) # Ux, U0\n A[2,1] = A[1,2] = conv(imX*imY, x*w[None, :, :],\n 'same', axes=(1, 2)) \\\n + psw*conv(imY*im, w[None, :, :],\n 'same', axes=(1, 2)) # Ux, V0\n A[2,2] = conv(imX*imX, x*x*w[None, :, :],\n 'same', axes=(1, 2)) \\\n + 2*psw*conv(imX*im, x*w[None, :, :],\n 'same', axes=(1, 2)) \\\n + psw**2*conv(im*im, w[None, :, :],\n 'same', axes=(1, 2)) # Ux, Ux\n A[3,0] = A[0,3] = conv(imY*imX, y[None,:,None]*w[None, :, :],\n 'same', axes=(1, 2)) \\\n + psw*conv(imX*im, w[None, :, :],\n 'same', axes=(1, 2)) # Vy, U0\n A[3,1] = A[1,3] = conv(imY*imY, y[None,:,None]*w[None, :, :],\n 'same', axes=(1, 2)) \\\n + psw*conv(imY*im, w[None, :, :],\n 'same', axes=(1, 2)) # Vy, V0\n A[3,2] = A[2,3] = conv(imY*imX, y[None,:,None]*x*w[None, :, :],\n 'same', axes=(1, 2)) \\\n + psw*conv(imY*im, y[None,:,None]*w[None, :, :],\n 'same', axes=(1, 2)) + \\\n + psw*conv(imX*im, x*w[None, :, :],\n 'same', axes=(1, 2)) \\\n + psw**2*conv(im*im, w[None, :, :],\n 'same', axes=(1, 2)) # Vy, Ux\n A[3,3] = conv(imY*imY, y[None,:,None]*y[None,:,None]*w[None, :, :],\n 'same', axes=(1, 2)) \\\n + 2*psw*conv(imY*im, y[None,:,None]*w[None, :, :],\n 'same', axes=(1, 2)) \\\n + psw**2*conv(im*im, w[None, :, :],\n 'same', axes=(1, 2)) # Vy, Vy\n A[4,0] = A[0,4] = conv(imX*imX, y[None,:,None]*w[None, :, :],\n 'same', axes=(1, 2)) # Uy, U0\n A[4,1] = A[1,4] = conv(imX*imY, y[None,:,None]*w[None, :, :],\n 'same', axes=(1, 2)) # Uy, V0\n A[4,2] = A[2,4] = conv(imX*imX, y[None,:,None]*x*w[None, :, :],\n 'same', axes=(1, 2)) \\\n + psw*conv(imX*im, y[None,:,None]*w[None, :, :],\n 'same', axes=(1, 2)) # Uy, Ux\n A[4,3] = A[3,4] = conv(imX*imY,\n y[None,:,None]*y[None,:,None]*w[None,:,:],\n 'same', axes=(1, 2)) \\\n + psw*conv(imX*im, y[None,:,None]*w[None,:,:],\n 'same', axes=(1, 2)) # Uy, Vy\n A[4,4] = conv(imX*imX, y[None,:,None]*y[None,:,None]*w[None,:,:],\n 'same', axes=(1, 2)) # Uy, Uy\n A[5,0] = A[0,5] = conv(imY*imX, x*w[None,:,:],\n 'same', axes=(1, 2)) # Vx, U0\n A[5,1] = A[1,5] = conv(imY*imY, x*w[None,:,:],\n 'same', axes=(1, 2)) # Vx, V0\n A[5,2] = A[2,5] = conv(imY*imX, x*x*w[None,:,:],\n 'same', axes=(1, 2)) \\\n + psw*conv(im*imY, x*w[None,:,:],\n 'same', axes=(1, 2)) # Vx, Ux\n A[5,3] = A[3,5] = conv(imY*imY, x*y[None,:,None]*w[None,:,:],\n 'same', axes=(1, 2)) \\\n + psw*conv(im*imY, x*w[None,:,:],\n 'same', axes=(1, 2)) # Vx, Vy\n A[5,4] = A[4,5] = conv(imY*imX, x*y[None,:,None]*w[None,:,:],\n 'same', axes=(1, 2)) # Vx, Uy\n A[5,5] = conv(imY*imY, x*x*w[None,:,:],\n 'same', axes=(1, 2)) #Vx, Vx\n\n if qsw:\n A[6,0] = A[0,6] = -qsw*conv(im*imX, w[None,:,:],\n 'same', axes=(1, 2)) # mu, U0\n A[6,1] = A[1,6] = -qsw*conv(im*imY, w[None,:,:],\n 'same', axes=(1, 2)) # mu, V0\n A[6,2] = A[2,6] = -qsw*conv(im*imX, x*w[None,:,:],\n 'same', axes=(1, 2)) \\\n - qsw*psw*conv(im*im, w[None,:,:],\n 'same', axes=(1, 2)) # mu, Ux\n A[6,3] = A[3,6] = -qsw*conv(im*imY, y[None,:,None]*w[None,:,:],\n 'same', axes=(1, 2)) \\\n - qsw*psw*conv(im*im, w[None,:,:],\n 'same', axes=(1, 2)) # mu, Vy\n A[6,4] = A[4,6] = -qsw*conv(im*imX, y[None,:,None]*w[None,:,:],\n 'same', axes=(1,2)) # mu, Uy\n A[6,5] = A[5,6] = -qsw*conv(im*imY, x*w[None,:,:],\n 'same', axes=(1, 2)) # mu, Vx\n A[6,6] = -qsw**2*conv(im*im, w[None,:,:],\n 'same', axes=(1,2)) # mu, mu\n\n B = np.empty((npar, dnt, dny, dnx))\n B[0] = conv(imT*imX, -w[None,:,:], 'same', axes=(1,2))\n B[1] = conv(imT*imY, -w[None,:,:], 'same', axes=(1,2))\n B[2] = conv(imT*imX, -x*w[None,:,:], 'same', axes=(1,2)) \\\n + psw*conv(imT*im, -w[None,:,:], 'same', axes=(1,2))\n B[3] = conv(imT*imY, -y[None,:,None]*w[None,:,:], 'same', axes=(1,2)) \\\n + psw*conv(imT*im, -w[None,:,:], 'same', axes=(1,2))\n B[4] = conv(imT*imX, -y[None,:,None]*w[None,:,:], 'same', axes=(1,2))\n B[5] = conv(imT*imY, -x*w[None,:,:], 'same', axes=(1,2))\n if qsw:\n B[6] = qsw*conv(imT*(-im), -w[None,:,:], 'same', axes=(1,2))\n\n dave = np.linalg.solve(A.T, B.T).T\n\n\n if not outSig:\n hdu = fits.PrimaryHDU(dave)\n hdu.header['type'] = 'DAVE'\n hdu.writeto(output, overwrite=overwrite)\n# else: #TODO sigma and chisq calculation\n return output\n\n\nclass readOFE:\n def __init__(self, data0, ofeFile, scale=None, dt=None, gMethod=True):\n \"\"\"\n Read the Optical Flow Estimated File.\n\n Parameters\n ----------\n scale: float\n Spatial pixel scale (arcsec).\n \"\"\"\n\n if type(data0) == str:\n data = fits.getdata(data0)\n header = fits.getheader(data0)\n nx = header['naxis1']\n ny = header['naxis2']\n cx = header['crval1']\n cy = header['crval2']\n cxp = header['crpix1']\n cyp = header['crpix2']\n dx = header['cdelt1']\n dy = header['cdelt2']\n dt = header['cdelt3']\n l = -(cxp+0.5)*dx+cx\n b = -(cyp+0.5)*dy+cy\n r = (nx-(cxp+0.5))*dx+cx\n t = (ny-(cyp+0.5))*dy+cy\n\n scale = dx\n else:\n data = data0.copy()\n l = -0.5\n b = -0.5\n r = nx-0.5\n t = ny-0.5\n dx = 1\n dy = 1\n self.extent = [l, r, b, t]\n self.data = data\n self.nt, self.ny, self.nx = self.data.shape\n\n self._xarr = np.linspace(self.extent[0]+dx*0.5,\n self.extent[1]-dx*0.5,\n self.nx)\n self._yarr = np.linspace(self.extent[2]+dy*0.5,\n self.extent[3]-dy*0.5,\n self.ny)\n\n if self.data.ndim != 3:\n raise ValueError(\"data must have 3 dimension\")\n if not scale or not dt:\n raise KeyError(\"If data is an `~numpy.ndarray`, \"\n \"'scale' and 'dt' must be given.\")\n unit = scale*725/dt # km/s\n self.ofe = fits.getdata(ofeFile)\n self.oheader = fits.getheader(ofeFile)\n self.otype = self.oheader['type']\n self.U0 = self.ofe[0]*unit\n self.V0 = self.ofe[1]*unit\n self.Ux = self.ofe[2]*unit\n self.Vy = self.ofe[3]*unit\n self.Uy = self.ofe[4]*unit\n self.Vx = self.ofe[5]*unit\n self.C = np.arctan2(self.V0, self.U0)\n\n if gMethod:\n Uy, Ux = np.gradient(self.U0, axis=(1,2))\n Vy, Vx = np.gradient(self.V0, axis=(1,2))\n else:\n Ux = self.Ux\n Uy = self.Uy\n Vx = self.Vx\n Vy = self.Vy\n\n self.div = Ux + Vy\n self.curl = Vx - Uy\n\n def imshow(self, t=1, div=True, curl=True, **kwargs):\n \"\"\"\n Display an data with velocity vector field.\n\n Parameters\n ----------\n t: int\n Default is 1.\n div: bool\n If True, display divergence map.\n curl: bool\n If True, display curl map.\n \"\"\"\n\n try:\n plt.rcParams['keymap.back'].remove('left')\n plt.rcParams['keymap.forward'].remove('right')\n except:\n pass\n\n self.t = t\n self._onDiv = div\n self._onCurl = curl\n kwargs['extent'] = kwargs.pop('extent', self.extent)\n kwargs['origin'] = kwargs.pop('origin', 'lower')\n width = kwargs.pop('width', 0.004)\n scale = kwargs.pop('scale', 200)\n\n if div or curl:\n nw = div + curl + 1\n else:\n nw = 1\n self.nw = nw\n self.fig = plt.figure(self.otype, figsize=(6*nw,6), clear=True)\n gs = GridSpec(11, nw, wspace=0, hspace=0)\n\n self.axVec = self.fig.add_subplot(gs[:10, 0])\n self.axSlider = self.fig.add_subplot(gs[10, :])\n self.im = self.axVec.imshow(self.data[self.t], **kwargs)\n self.vec = self.axVec.quiver(self._xarr, self._yarr,\n self.U0[self.t],\n self.V0[self.t],\n self.C[self.t],\n cmap=plt.cm.hsv,\n width=width,\n scale=scale)\n\n self.axVec.set_title(r'$\\mathbf{v}$')\n self.axVec.set_xlabel('X')\n self.axVec.set_ylabel('Y')\n self.axVec.set_title(r'$\\mathbf{v}$ field ' f'({self.otype})')\n if div:\n self.axDiv = self.fig.add_subplot(gs[:100, 1], sharex=self.axVec,\n sharey=self.axVec)\n self.imDiv = self.axDiv.imshow(self.div[self.t],\n plt.cm.Seismic,\n **kwargs)\n self.axDiv.tick_params(labelbottom=False, labelleft=False)\n self.axDiv.set_title(r'$\\mathbf{\\nabla} \\cdot$'\n r'$\\mathbf{v}$')\n\n if curl:\n self.axCurl = self.fig.add_subplot(gs[:10, -1], sharex=self.axVec,\n sharey=self.axVec)\n self.imCurl = self.axCurl.imshow(self.curl[self.t],\n plt.cm.PiYG,\n **kwargs)\n self.axCurl.tick_params(labelbottom=False, labelleft=False)\n self.axCurl.set_title(r'$\\mathbf{\\nabla} \\times \\mathbf{V}$')\n\n self.sT = Slider(self.axSlider, 'Time(pix)', 0, self.nt-1,\n valinit=self.t, valstep=1, valfmt=\"%i\")\n self.sT.on_changed(self._chTime)\n self.fig.tight_layout()\n self.fig.canvas.mpl_connect('key_press_event', self._onKey)\n\n def _chTime(self, val):\n self.t = int(self.sT.val)\n self.im.set_data(self.data[self.t])\n self.vec.set_UVC(self.U0[self.t], self.V0[self.t], self.C[self.t])\n if self._onDiv:\n self.imDiv.set_data(self.div[self.t])\n if self._onCurl:\n self.imCurl.set_data(self.curl[self.t])\n\n def _onKey(self, event):\n if event.key == 'left':\n if self.t > 0:\n self.t -= 1\n else:\n self.t = self.nt-1\n self.sT.set_val(self.t)\n elif event.key == 'right':\n if self.t < self.nt-1:\n self.t += 1\n else:\n self.t = 0\n self.sT.set_val(self.t)\n\n\n\n\n# def runNAVE():\n","sub_path":"fisspy/analysis/ofe.py","file_name":"ofe.py","file_ext":"py","file_size_in_byte":15190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"519340945","text":"#!/usr/bin/env python3\n\nfrom .metric import SourceAndReferencedMetric\nfrom questeval.questeval_metric import QuestEval as QuestEvalMetric\nfrom logzero import logger\n\n\nclass QuestEval(SourceAndReferencedMetric):\n def __init__(self):\n # Default values\n self.task = \"summarization\"\n self.language = \"en\"\n self._this_task_is_available = True\n\n self.metric = QuestEvalMetric(\n task=self.task,\n language=self.language,\n )\n\n def support_caching(self):\n # We are using corpus-level QuestEval. \n return False\n\n def compute(self, cache, predictions, references, sources):\n # If task or language is different, we must change QA / QG models for questeval\n if predictions.task != self.task or predictions.language.alpha_2 != self.language:\n self.task = predictions.task\n self.language = predictions.language\n\n # Checking if the task is available\n task = predictions.task\n self._this_task_is_available = True\n if self.task not in self.metric.AVAILABLE_TASKS:\n self._this_task_is_available = False\n task = \"text2text\"\n logger.warning(\n \"This task is not available, QuestMetric is using the general text2text models.\"\n )\n\n self.metric = QuestEvalMetric(\n task=task,\n language=predictions.language.alpha_2,\n )\n\n # If the task was not available, then we pass references instead of sources\n local_sources, local_references = sources.untokenized, [[None]] * len(sources.untokenized)\n if self._this_task_is_available is False:\n local_sources, local_references = [None] * len(\n references.untokenized\n ), references.untokenized\n\n # Computing scores through one batched step\n scores = self.metric.corpus_questeval(\n hypothesis=predictions.untokenized,\n sources=local_sources,\n list_references=local_references,\n )\n\n formatted_scores = {}\n for sc, pred_id in zip(scores['ex_level_scores'], predictions.ids):\n formatted_score = {\"questeval\": sc}\n formatted_scores[pred_id] = formatted_score\n if cache is not None:\n cache_key = (self.__class__.__name__, predictions.filename, pred_id)\n cache[cache_key] = formatted_score\n\n return formatted_scores\n","sub_path":"gem_metrics/questeval.py","file_name":"questeval.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"81544861","text":"import glob\nfrom datetime import datetime\nfrom xlwt import Workbook\n\nwb = Workbook()\nsheet1 = wb.add_sheet('Sheet 1')\nsheet1.write(0,0,\"#\")\nsheet1.col(0).width = 1500\nsheet1.write(0,1,\"дата\")\nsheet1.col(1).width = 2500\nsheet1.write(0,2,\"время\")\nsheet1.col(2).width = 1500\nsheet1.write(0,3,\"Имя\")\nsheet1.col(3).width = 5500\nsheet1.write(0,4,\"телефон\")\nsheet1.col(4).width = 3500\nsheet1.write(0,6,\"Филиал\")\nsheet1.col(6).width = 4000\nsheet1.write(0,5,\"Адрес\")\nsheet1.col(5).width = 7000\nsheet1.write(0,7,\"оплата\")\nsheet1.col(7).width = 3000\nsheet1.write(0,8,\"Итого\")\nsheet1.col(8).width = 2500\n\n\nwith open(\"Cafe.txt\", 'r') as f:\n orderList = [line.split('\\n') for line in f.readlines()]\n i = 1\n for m in range(len(orderList)):\n if \"Заказ\" in str(orderList[m]):\n date = str(orderList[m-1][-2])\n sheet1.write(i,1, date.split()[-2][1:])\n sheet1.write(i,2, date.split()[-1][:-1])\n i +=1\n\n\n\nsearchfile = open(\"Cafe.txt\", \"r\")\ni = 1;\nfor line in searchfile:\n\n #if \"#\" in line: sheet1.write(i,0, \"\".join(line.split()[1][1:]))\n if line.find( \"Заказ\" ) == 0 or line.find( \"Заказ\" ) == 1:\n sheet1.write(i,0, float(\"\".join(line.split()[1][1:])))\n\n if line.find( \"Имя:\" ) == 0:\n sheet1.write(i,3, \"\".join(line.split()[1:]))\n\n if line.find( \"Телефон:\" ) == 0:\n if \"+\" in line:\n sheet1.write(i,4,(\"\".join(line.split()[1][1:])))\n else:\n sheet1.write(i,4,(\"\".join(line.split()[1:])))\n\n if line.find( \"Филиал: \" ) == 0:\n sheet1.write(i,6, \"\".join(line.split()[1:]))\n\n if line.find( \"Адрес:\" ) == 0:\n if \" ?? \" in line:\n sheet1.write(i,5,\" \".join(line.split()[2:]))\n else:\n sheet1.write(i,5,\" \".join(line.split()[1:]))\n\n #if \"Метод оплаты:\" in line: sheet1.write(i,4,\" \".join(line.split()[-1]))\n if line.find( \"Метод оплаты\" ) == 0:\n sheet1.write(i,7, \"\".join(line.split()[-1]))\n\n #if \"Итого\" in line: sheet1.write(i,5,\" \".join(line.split()[1:-1]))\n if line.find( \"Итого\" ) == 0:\n sheet1.write(i,8,float(\"\".join(line.split()[1:-1])))\n i = i + 1\n\n #if \"Метод оплаты:\" in line: i+=1 #incriments i so the next line will be used next\n\nsearchfile.close()\n\nwb.save('%s.xls'%datetime.now().strftime(\"%m-%d-%Y___%H-%M-%S\"))\n","sub_path":"filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"413586379","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jul 11 09:54:15 2020\r\n\r\n@author: Grant\r\n\"\"\"\r\n\r\nclass Edge:\r\n\tdef __init__(self, src, dest, weight):\r\n\t\tself.src = src\r\n\t\tself.dest = dest\r\n\t\tself.weight = weight\r\n\r\nclass Node:\r\n\tdef __init__(self, value, weight):\r\n\t\tself.value = value\r\n\t\tself.weight = weight\r\n\r\nclass Graph:\r\n\tdef __init__(self, edges):\r\n\r\n\t\tself.adj = [None] * len(edges)\r\n\r\n\t\tfor i in range(len(edges)):\r\n\t\t\tself.adj[i] = []\r\n\r\n\t\tfor e in edges:\r\n\t\t\tnode = Node(e.dest, e.weight)\r\n\t\t\tself.adj[e.src].append(node)\r\n\r\nedges = [Edge(0, 1, 2), Edge(0, 2, 4), Edge(1, 3, 2),\r\n\t\tEdge(1, 2, 1), Edge(2, 4, 4), Edge(3, 4, 2)]\r\n\r\ngraph = Graph(edges)\r\n\r\ndef BellmanFord(graph, start):\r\n A = []\r\n\r\n n = 5\r\n\r\n for i in range(0, n + 1):\r\n A.append([])\r\n\r\n Initial_row = []\r\n\r\n starting_vertex = start\r\n\r\n for i in range(0, n):\r\n if i == starting_vertex:\r\n Initial_row.append(0)\r\n else:\r\n Initial_row.append(100)\r\n\r\n A[0] = Initial_row\r\n \r\n B = []\r\n\r\n n = 5\r\n\r\n for i in range(0, n + 1):\r\n B.append([])\r\n \r\n for i in range(0, n):\r\n B[0].append(None)\r\n\r\n for i in range(1, n + 1):\r\n for v in range(0, n):\r\n \r\n case_1 = A[i - 1][v]\r\n \r\n w = []\r\n e = []\r\n \r\n for src in range(len(graph.adj)):\r\n for edge in graph.adj[src]:\r\n if edge.value == v:\r\n w.append(src)\r\n e.append(edge.weight)\r\n \r\n total = []\r\n for b in range(len(w)):\r\n total.append(A[i - 1][w[b]] + e[b])\r\n \r\n if total != []:\r\n case_2 = min(total)\r\n A[i].append(min(case_1, case_2))\r\n \r\n chosen_case = min(case_1, case_2)\r\n \r\n if chosen_case == case_1:\r\n B[i].append(B[i - 1][v])\r\n else:\r\n chosen_index = total.index(min(total))\r\n vertex = w[chosen_index]\r\n B[i].append(vertex)\r\n else:\r\n A[i].append(case_1)\r\n B[i].append(B[i - 1][v])\r\n \r\n if i == n:\r\n if A[n - 1] != A[n]:\r\n print(\"Negative Cycle Detected!\")\r\n return\r\n \r\n nodes = [*range(0, n)]\r\n min_distances = A[len(A) - 1]\r\n print(nodes)\r\n print(min_distances)\r\n \r\n print(B[len(B) - 1])\r\n\r\nBellmanFord(graph, 0)","sub_path":"BellmanFordAlgorithm.py","file_name":"BellmanFordAlgorithm.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"28422528","text":"\"\"\" An abstract super-class that gives the function to read a file\"\"\"\n\nfrom sensim.sensor import Sensor, checkNoneTime\n\n\nclass Importer(Sensor):\n \"\"\" An super-class that gives the function to read a file \"\"\"\n\n def __init__(self, filename, name=None):\n Sensor.__init__(self, name)\n self.filename = filename\n\n def read_file(self):\n with open(self.filename, \"r\") as opened_file:\n return opened_file.read()\n\n def _advanceTime(self):\n if self.data:\n self.data.popleft()\n if self.data:\n self.time = self.data[0].timestamp\n else:\n self.time = None\n else:\n self.time = None\n\n @checkNoneTime\n def _getNext(self):\n if self.data:\n return self.data[0]\n else:\n return None\n\n @checkNoneTime\n def _popNext(self):\n return_val = self._getNext()\n self._advanceTime()\n return return_val\n","sub_path":"lab_2/sensim_project_Fernandez_Vavrille/sensim/sensor/importer.py","file_name":"importer.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"133420439","text":"#\n#\n#\n#\n#\n# SOFTWARE HISTORY\n#\n# Date Ticket# Engineer Description\n# ------------ ---------- ----------- --------------------------\n# 03/09/11 njensen Initial Creation.\n# 08/15/13 2169 bkowal Decompress data read from the queue\n#\n#\n#\n\nimport time, sys\nimport threading\n\nimport dynamicserialize\n\nTIME_TO_SLEEP = 300\n\nclass ListenThread(threading.Thread):\n\n def __init__(self, hostname, portNumber, topicName):\n self.hostname = hostname\n self.portNumber = portNumber\n self.topicName = topicName\n self.nMessagesReceived = 0\n self.waitSecond = 0\n self.stopped = False\n threading.Thread.__init__(self)\n\n def run(self):\n from awips import QpidSubscriber\n self.qs = QpidSubscriber.QpidSubscriber(self.hostname, self.portNumber, True)\n self.qs.topicSubscribe(self.topicName, self.receivedMessage)\n\n def receivedMessage(self, msg):\n print(\"Received message\")\n self.nMessagesReceived += 1\n if self.waitSecond == 0:\n fmsg = open('/tmp/rawMessage', 'w')\n fmsg.write(msg)\n fmsg.close()\n\n while self.waitSecond < TIME_TO_SLEEP and not self.stopped:\n if self.waitSecond % 60 == 0:\n print(time.strftime('%H:%M:%S'), \"Sleeping and stuck in not so infinite while loop\")\n self.waitSecond += 1\n time.sleep(1)\n\n print(time.strftime('%H:%M:%S'), \"Received\", self.nMessagesReceived, \"messages\")\n\n def stop(self):\n print(\"Stopping\")\n self.stopped = True\n self.qs.close()\n\n\n\ndef main():\n print(\"Starting up at\", time.strftime('%H:%M:%S'))\n\n topic = 'edex.alerts'\n host = 'localhost'\n port = 5672\n\n thread = ListenThread(host, port, topic)\n try:\n thread.start()\n while True:\n time.sleep(3)\n except KeyboardInterrupt:\n pass\n finally:\n thread.stop()\n\n\nif __name__ == '__main__':\n main()\n\n\n\n","sub_path":"awips/test/testQpidTimeToLive.py","file_name":"testQpidTimeToLive.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"95936733","text":"from pyspark import SparkContext, SparkConf\nfrom pyspark.sql.session import SparkSession\nfrom pyspark.sql.types import DoubleType\nimport geopandas as gpd\n\ndef extract_data(spark,startyear=2000, endyear=2015):\n co2_data = spark.read.option(\"inferSchema\", \"true\").option(\"header\", \"true\").csv(\n \"C:/Users/Stone/Desktop/Uni/BigData/Project/API_EN.ATM.CO2E.PC_DS2_en_csv_v2_1217665.csv\")\n return co2_data.select(\"Country Name\", str(startyear), str(endyear))\n\ndef remove_null(word):\n if \"null\" in word:\n word = \"0\"\n return word\n\n\ndef main():\n\n startyear=2000\n endyear=2019\n\n sc = SparkContext(\"local\", \"BigDataProject\")\n sc.setLogLevel(\"ERROR\")\n spark = SparkSession(sc)\n\n\n result = extract_data(spark,startyear=startyear,endyear=endyear)\n\n result = result.withColumn(str(startyear), result[str(startyear)].cast(DoubleType()))\n result = result.withColumn(str(endyear), result[str(endyear)].cast(DoubleType()))\n\n result = result.na.fill(0)\n\n result.show()\n print(result)\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"venv/bigdataprojekt.py","file_name":"bigdataprojekt.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"15110456","text":"import wget\nimport requests\nimport re\nimport csv\n\noffset = 5768\nnum_of_images = 7930-offset+1\n\n#num_of_images = 2000\n\n# res = requests.get('http://www.antiquities.org.il/t/item_en.aspx?CurrentPageKey=2070')\n# m = re.search('href=\"(images[^ ]*jpg)\"', res.text)\n# print(m.group(1))\n# text_file = open(\"Output11.txt\", \"w\")\n# text_file.write(res.text)\n# text_file.close()\n\n# res = requests.get('http://www.antiquities.org.il/t/item_en.aspx?CurrentPageKey=15')\n# m = re.search('href=\"(images[^ ]*jpg)\"', res.text)\n# print(m.group(1))\n# text_file = open(\"Output2.txt\", \"w\")\n# text_file.write(res.text)\n# text_file.close()\n\nobject_name = []\nperiod = []\nsite = []\nmaterial = []\nhieght = []\nlength = []\nwidth = []\nindex = []\ndiameter = []\n\ntry:\n for k in range(num_of_images):\n res = requests.get('http://www.antiquities.org.il/t/item_en.aspx?CurrentPageKey={}'.format(offset+k))\n # get image link\n m = re.findall('href=\"(images[^ ]*jpg)\"', res.text)\n set_image = set(m)\n print(set_image)\n if m is not None:\n for kk in range(len(set(m))):\n # download image\n wget.download('http://www.antiquities.org.il/t/{}'.format(set_image.pop()),out='{}_{}.jpg'.format((offset+k),kk))\n index.append(offset+k)\n else:\n index.append(-1)\n\n # object name\n #print(res.text)\n m = re.search('Object\\'s Name[ ]?: <\\/b>([^<]*)([^<]*)([^<]*)([^<]*)([^<]*) cm([^<]*) cm([^<]*) cm([^<]*) cm 9:\n my_sum2 = my_sum - 9\n totalone += my_sum2\n else: \n totaltwo += my_sum\n else:\n totalthree += number\n calculate_credit_card_number_check_digit = (totalone + totaltwo + totalthree)\n\n\ndef validate_credit_card_number_check_digit():\n if calculate_credit_card_number_check_digit % 10 == 0:\n print(\"valid card number entered\")\n else: \n print(\"invalid card number entered\")\n\nvalidate_credit_card_number_check_digit()","sub_path":"creditCard.py","file_name":"creditCard.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"339466043","text":"\"\"\"Helper functions for training Motion Transformation VAE on Human3.6M.\"\"\"\n\nimport os\nimport numpy as np\nimport tensorflow as tf\n\nimport h36m_losses as losses\nimport H36M_BasePredModel as BasePredModel\nfrom nets import h36m_mtvae_factory as model_factory\n\nslim = tf.contrib.slim\n\nclass MTVAEPredModel(BasePredModel.BasePredModel):\n \"\"\"Defines MTVAE Prediction Model.\"\"\"\n \n def __init__(self, params):\n super(MTVAEPredModel, self).__init__(params)\n\n def get_model_fn(self, is_training, use_prior, reuse):\n params = self._params\n model_fn = model_factory.get_model_fn(self._params, is_training,\n use_prior, reuse)\n return model_fn\n\n def get_sample_fn(self, is_training, use_prior, reuse, output_length=None):\n return model_factory.get_sample_fn(self._params, is_training,\n use_prior, reuse, output_length)\n \n def get_loss(self, step, inputs, outputs):\n total_loss = tf.zeros(dtype=tf.float32, shape=[])\n loss_dict = dict()\n params = self._params\n\n if hasattr(params, 'keypoint_weight') and \\\n (params.keypoint_weight > 0):\n keypoint_loss = losses.get_keypoint_loss(\n inputs, outputs, params.max_length, params.keypoint_weight)\n loss_dict['post_keypoint_loss'] = keypoint_loss\n total_loss += keypoint_loss\n \n if hasattr(params, 'velocity_weight') and (params.velocity_weight > 0):\n assert params.cycle_model\n #\n curr_velocity_weight = (params.velocity_weight - (params.velocity_weight - params.velocity_start_weight) * (params.velocity_decay_rate)**tf.to_float(step))\n velocity_loss = losses.get_velocity_loss(\n inputs['last_landmarks'], inputs['fut_landmarks'], outputs['fut_landmarks'],\n inputs['fut_lens'], curr_velocity_weight * params.keypoint_weight,\n 'post_velocity_loss', params.velocity_length)\n velocity_loss += losses.get_velocity_loss(\n inputs['last_landmarks'], inputs['fut_landmarks'], outputs['cycle_fut_landmarks'],\n inputs['fut_lens'], curr_velocity_weight * params.keypoint_weight,\n 'prior_velocity_loss', params.velocity_length)\n loss_dict['velocity_loss'] = velocity_loss\n total_loss += velocity_loss\n\n if hasattr(params, 'kl_weight') and (params.kl_weight > 0):\n curr_kl_weight = (params.kl_weight - (params.kl_weight - params.kl_start_weight) * \n (params.kl_decay_rate)**tf.to_float(step)) \n kl_loss = losses.get_kl_loss(\n inputs, outputs, curr_kl_weight, params.kl_tolerance)\n loss_dict['kl_loss'] = kl_loss\n total_loss += kl_loss\n\n if hasattr(params, 'cycle_weight') and params.cycle_model and (params.cycle_weight > 0):\n cycle_loss = losses.get_cycle_loss(\n inputs, outputs, params.cycle_weight)\n loss_dict['cycle_loss'] = cycle_loss\n total_loss += cycle_loss\n \n slim.summaries.add_scalar_summary(\n total_loss, 'keypoint_mtvae_loss', prefix='losses')\n return total_loss, loss_dict\n\n def print_running_loss(self, global_step, loss_dict):\n params = self._params\n if params.keypoint_weight > 0:\n norm_keypoint_loss = loss_dict['post_keypoint_loss'] / params.keypoint_weight\n else:\n norm_keypoint_loss = 0\n\n if params.kl_weight > 0:\n curr_kl_weight = (params.kl_weight - (params.kl_weight - params.kl_start_weight) * \n (params.kl_decay_rate)**tf.to_float(global_step))\n norm_kl_loss = loss_dict['kl_loss'] / curr_kl_weight\n else:\n norm_kl_loss = 0\n \n if hasattr(params, 'velocity_weight') and params.velocity_weight > 0:\n curr_velocity_weight = (params.velocity_weight - (params.velocity_weight - params.velocity_start_weight) * (params.velocity_decay_rate)**tf.to_float(global_step))\n norm_velocity_loss = loss_dict['velocity_loss'] / (curr_velocity_weight * params.keypoint_weight)\n else:\n norm_velocity_loss = 0\n\n if hasattr(params, 'cycle_weight') and params.cycle_weight > 0:\n norm_cycle_loss = loss_dict['cycle_loss'] / params.cycle_weight\n else:\n norm_cycle_loss = 0\n \n def print_loss(step, keypoint_loss, kl_loss, velocity_loss, cycle_loss):\n print('[%06d]\\t[Keypoint %.3f]\\t[KL %.3f]\\t[VF %.3f]\\t[CYCLE %.3f]' % \\\n (step, keypoint_loss, kl_loss, velocity_loss, cycle_loss)) \n return 0\n ret_tmp = tf.py_func(\n func=print_loss,\n inp=[global_step, norm_keypoint_loss, norm_kl_loss, \n norm_velocity_loss, norm_cycle_loss],\n Tout=[tf.int64], name='print_loss')[0]\n ret_tmp = tf.to_int32(ret_tmp)\n return ret_tmp\n \n","sub_path":"H36M_MTVAEPredModel.py","file_name":"H36M_MTVAEPredModel.py","file_ext":"py","file_size_in_byte":4616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"568625504","text":"import json\nimport fnmatch\nimport hashlib\nimport traceback\nfrom functools import wraps\n\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.conf import settings\n\nfrom channels import Group, DEFAULT_CHANNEL_LAYER\nfrom channels.asgi import channel_layers\nfrom channels.routing import Route, Include\nfrom channels.utils import name_that_thing\n\nfrom . import GROUP_NAME_GROUPS, GROUP_NAME_CHANNELS, GROUP_PREFIX\nfrom .settings import get_setting_value\n\n\nclass MessageJSONEncoder(DjangoJSONEncoder):\n\n def default(self, o):\n if isinstance(o, bytes):\n return o.decode('utf-8')\n return super(MessageJSONEncoder, self).default(o)\n\n\ndef send_debug(data, event, group=GROUP_NAME_GROUPS):\n Group(group).send({'text': json.dumps({'data': data, 'event': event}, cls=MessageJSONEncoder)})\n\n\ndef _get_route(route, prefix=None):\n if isinstance(route, Route):\n yield route.channels, route.consumer, route.filters, prefix\n elif isinstance(route, Include):\n for _route in route.routing:\n for route_params in _get_route(_route, route.prefixes):\n yield route_params\n\n\ndef get_routes(layer):\n for route_params in _get_route(channel_layers[layer].router.root):\n yield route_params\n\n\ndef layer_factory(base, alias):\n class DebugChannelLayer(base.channel_layer.__class__):\n\n def send(self, channel, message):\n if in_debug(channel):\n send_debug({'channel': channel, 'layer': alias, 'content': message},\n 'send', GROUP_NAME_CHANNELS)\n return super(DebugChannelLayer, self).send(channel, message)\n\n def group_add(self, group, channel):\n if in_debug(group, group=True):\n send_debug({'channel': channel, 'group': group, 'layer': alias}, 'add')\n return super(DebugChannelLayer, self).group_add(group, channel)\n\n def group_discard(self, group, channel):\n if in_debug(group, group=True):\n send_debug({'channel': channel, 'group': group, 'layer': alias}, 'discard')\n return super(DebugChannelLayer, self).group_discard(group, channel)\n\n def send_group(self, group, message):\n if in_debug(group, group=True):\n send_debug({'content': message, 'group': group, 'layer': alias}, 'send')\n return super(DebugChannelLayer, self).send_group(group, message)\n\n base.channel_layer = DebugChannelLayer(**getattr(settings, \"CHANNEL_LAYERS\", {})[alias].get(\"CONFIG\", {}))\n return base\n\n\ndef debug_decorator(consumer, alias):\n @wraps(consumer)\n def _consumer(message, *args, **kwargs):\n if in_debug(message.channel.name):\n name = name_that_thing(consumer)\n group = get_consumer_group(name)\n info = {\n 'layer': alias,\n 'channel': message.channel.name,\n 'consumer': name,\n 'call_args': args,\n 'call_kwargs': kwargs,\n 'message': message.content,\n }\n\n try:\n consumer(message, *args, **kwargs)\n except Exception:\n info['traceback'] = traceback.format_exc()\n send_debug(info, 'error', group)\n raise\n else:\n send_debug(info, 'run', group)\n return\n return consumer(message, *args, **kwargs)\n return _consumer\n\n\ndef in_debug(name, group=False):\n only = get_setting_value('ONLY_GROUPS' if group else 'ONLY_CHANNELS')\n exclude = get_setting_value('EXCLUDE_GROUPS' if group else 'EXCLUDE_CHANNELS')\n if only:\n if any(fnmatch.fnmatchcase(name, pattern) for pattern in only):\n return True\n return False\n if exclude:\n if not any(fnmatch.fnmatchcase(name, pattern) for pattern in exclude):\n return True\n\n\ndef md5(message):\n _md5 = hashlib.md5()\n _md5.update(message.encode())\n return _md5.hexdigest()\n\n\ndef get_consumer_group(consumer, layer=DEFAULT_CHANNEL_LAYER):\n return '.'.join([GROUP_PREFIX, layer, md5(consumer)])\n","sub_path":"channels_panel/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"243762794","text":"import os\nimport json\nfrom mist_json import MistJson\n\ndef make_clean_mist_samples(filepath, file_list):\n\n for idx, file in enumerate(file_list):\n with open(f'{filepath}/{file}') as file:\n file_data = json.load(file)\n mist = MistJson()\n mist.run(file_data, idx)\n\n\nif __name__ == '__main__':\n filepath = '../../MalwareTrainingSets/trainingSets/Benign'\n file_list = os.listdir(filepath)\n make_clean_mist_samples(filepath, file_list)","sub_path":"src/clean_mist.py","file_name":"clean_mist.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"262356810","text":"\"\"\"Plotting script for event detection.\"\"\"\n\nfrom cli_plotting import _get_parser\nfrom os.path import join as opj\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import cm\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport matplotlib.pylab as pylab\n\nimport ev\n\nfontsize = 28\nparams = {'legend.fontsize': fontsize,\n 'axes.labelsize': fontsize,\n 'axes.titlesize': fontsize,\n 'xtick.labelsize': fontsize,\n 'ytick.labelsize': fontsize}\npylab.rcParams.update(params)\n# Global variables\nNRAND = 100\nTR = 0.83\nFIGSIZE = (45, 30)\nHISTORY = \"Deconvolution based on event-detection.\"\n# Font size for plots\nfont = {\"weight\": \"normal\", \"size\": 28}\nmatplotlib.rc(\"font\", **font)\n\n\ndef plot_comparison(\n rss_orig_sur,\n rssr_orig_sur,\n idxpeak_orig_sur,\n rss_fitt,\n rssr_fitt,\n idxpeak_fitt,\n rss_beta,\n rssr_beta,\n idxpeak_beta,\n rss_auc,\n rssr_auc,\n idxpeak_auc,\n ats,\n outdir,\n):\n \"\"\"\n Plot comparison of different RSS with vertical subplots.\n \"\"\"\n greymap = cm.get_cmap(\"Greys\")\n colors = greymap(np.linspace(0, 0.65, rssr_orig_sur.shape[1]))\n\n min_range = np.min(np.minimum(rss_orig_sur, rss_fitt)) * 0.9\n max_range = np.max(np.maximum(rss_orig_sur, rss_fitt)) * 1.1\n\n _, axs = plt.subplots(5, 1, figsize=FIGSIZE)\n for i in range(rssr_orig_sur.shape[1]):\n axs[0].plot(rssr_orig_sur[:, i], color=colors[i], linewidth=0.5)\n axs[0].plot(\n idxpeak_orig_sur,\n rss_orig_sur[idxpeak_orig_sur],\n \"r*\",\n label=\"orig_sur-peaks\",\n markersize=20,\n )\n axs[0].plot(\n rss_orig_sur,\n color=\"k\",\n linewidth=3,\n label=\"orig_sur\",\n )\n axs[0].set_ylim([min_range, max_range])\n axs[0].set_title(\"Original signal\")\n\n for i in range(rssr_orig_sur.shape[1]):\n axs[1].plot(rssr_fitt[:, i], color=colors[i], linewidth=0.5)\n axs[1].plot(\n idxpeak_fitt, rss_fitt[idxpeak_fitt], \"r*\", label=\"fitt-peaks\", markersize=20\n )\n axs[1].plot(rss_fitt, color=\"k\", linewidth=3, label=\"fitt\")\n axs[1].set_ylim([min_range, max_range])\n axs[1].set_title(\"Fitted signal\")\n\n for i in range(rssr_orig_sur.shape[1]):\n axs[2].plot(rssr_beta[:, i], color=colors[i], linewidth=0.5)\n axs[2].plot(\n idxpeak_beta, rss_beta[idxpeak_beta], \"r*\", label=\"beta-peaks\", markersize=20\n )\n axs[2].plot(rss_beta, color=\"k\", linewidth=3, label=\"beta\")\n axs[2].set_title(\"Betas\")\n\n for i in range(rssr_orig_sur.shape[1]):\n axs[3].plot(rssr_auc[:, i], color=colors[i], linewidth=0.5)\n axs[3].plot(\n idxpeak_auc, rss_auc[idxpeak_auc], \"r*\", label=\"AUC-peaks\", markersize=20\n )\n axs[3].plot(rss_auc, color=\"k\", linewidth=3, label=\"AUC\")\n axs[3].set_title(\"AUCs\")\n\n axs[4].plot(ats, label=\"ATS\", color=\"black\")\n axs[4].set_title(\"Activation time-series\")\n\n plt.legend()\n plt.savefig(opj(outdir, \"event_detection.png\"), dpi=300)\n\n\ndef plot_all(\n rss_orig_sur,\n idxpeak_orig_sur,\n rss_beta,\n idxpeak_beta,\n rss_fitt,\n idxpeak_fitt,\n outdir,\n):\n \"\"\"\n Plot all RSS lines on same figure.\n \"\"\"\n plt.figure(figsize=FIGSIZE)\n\n # Original signal\n rss_orig_norm = (rss_orig_sur - rss_orig_sur.min()) / (\n rss_orig_sur.max() - rss_orig_sur.min()\n )\n plt.plot(\n idxpeak_orig_sur,\n rss_orig_norm[idxpeak_orig_sur],\n \"r*\",\n \"linewidth\",\n 3,\n label=\"orig_sur-peaks\",\n )\n plt.plot(\n range(rss_orig_norm.shape[0]),\n rss_orig_norm,\n \"k\",\n \"linewidth\",\n 3,\n label=\"orig_sur\",\n )\n\n # Betas\n rss_beta_norm = (rss_beta - rss_beta.min()) / (rss_beta.max() - rss_beta.min())\n plt.plot(\n idxpeak_beta,\n rss_beta_norm[idxpeak_beta],\n \"g*\",\n \"linewidth\",\n 3,\n label=\"deconvolved_peaks\",\n )\n plt.plot(\n range(rss_beta_norm.shape[0]),\n rss_beta_norm,\n \"b\",\n \"linewidth\",\n 3,\n label=\"deconvolved\",\n )\n\n # Fitted signal\n rss_fitt_norm = (rss_fitt - rss_fitt.min()) / (rss_fitt.max() - rss_fitt.min())\n plt.plot(\n idxpeak_fitt,\n rss_fitt_norm[idxpeak_fitt],\n \"m*\",\n \"linewidth\",\n 3,\n label=\"fitted_peaks\",\n )\n plt.plot(\n range(rss_fitt_norm.shape[0]),\n rss_fitt_norm,\n \"y\",\n \"linewidth\",\n 3,\n label=\"fitted\",\n )\n plt.legend()\n plt.savefig(opj(outdir, \"event_detection_all.png\"), dpi=300)\n\n\ndef plot_ets_matrix(ets, outdir, sufix=\"\", dvars=None, enorm=None, peaks=None, vmin=-0.5, vmax=0.5):\n \"\"\"\n Plots edge-time matrix\n \"\"\"\n if dvars is not None and enorm is not None:\n # widths = [1]\n # heights = [2, 1, 1]\n # gs = dict(width_ratios=widths, height_ratios=heights)\n # fig, axs = plt.subplots(3, 1, figsize=FIGSIZE,gridspec_kw=gs)\n # im = axs[0].imshow(ets.T, vmin=vmin, vmax=vmax, cmap=\"bwr\", aspect=\"auto\")\n # axs[0].set_title(\"Edge-time series\")\n # axs[0].set_ylabel(\"Edge-edge connections\")\n # fig.colorbar(im, orientation=\"vertical\", ax=axs[0]) # ax=axs.ravel().tolist()\n # axs[1].plot(dvars)\n # axs[1].set_title(\"DVARS\")\n # axs[2].plot(enorm)\n # axs[2].set_title(\"ENORM\")\n # axs[2].set_xlabel(\"Time (TR)\")\n fig = plt.subplots(figsize=FIGSIZE)\n ax0 = plt.subplot(111)\n divider = make_axes_locatable(ax0)\n ax1 = divider.append_axes(\"bottom\", size=\"25%\", pad=1)\n ax2 = divider.append_axes(\"bottom\", size=\"25%\", pad=1)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.08)\n im = ax0.imshow(ets.T, vmin=vmin, vmax=vmax, cmap=\"bwr\", aspect=\"auto\")\n ax0.set_ylabel(\"Edge-edge connections\")\n cb = plt.colorbar(im, orientation=\"vertical\", ax=ax0, cax=cax) # ax=axs.ravel().tolist()\n dvars[1] = np.mean(dvars)\n ax1.plot(dvars)\n ax1.set_title(\"DVARS\")\n ax1.margins(0, 0)\n for i in peaks:\n ax1.axvspan(i, i+1, facecolor='b', alpha=0.5)\n ax2.axvspan(i, i+1, facecolor='b', alpha=0.5)\n ax2.plot(enorm)\n ax2.set_title(\"ENORM\")\n ax2.set_xlabel(\"Time (TR)\")\n ax2.margins(0, 0)\n plt.savefig(opj(outdir, f\"ets{sufix}.png\"), dpi=300)\n else:\n fig, axs = plt.subplots(1, 1, figsize=FIGSIZE)\n plt.imshow(ets.T, vmin=vmin, vmax=vmax, cmap=\"bwr\", aspect=\"auto\")\n plt.title(\"Edge-time series\")\n plt.xlabel(\"Time (TR)\")\n plt.ylabel(\"Edge-edge connections\")\n plt.colorbar()\n plt.savefig(opj(outdir, f\"ets{sufix}.png\"), dpi=300)\n\n\ndef main(argv=None):\n \"\"\"\n Main function to perform event detection and plot results.\n \"\"\"\n options = _get_parser().parse_args(argv)\n kwargs = vars(options)\n\n # Global variables\n SUBJECT = kwargs[\"subject\"][0]\n NROIS = kwargs[\"nROI\"][0]\n # Paths to files\n MAINDIR = kwargs[\"dir\"][0]\n TEMPDIR = opj(MAINDIR, f\"temp_{SUBJECT}_{NROIS}\")\n ORIGDIR = \"/bcbl/home/public/PARK_VFERRER/PREPROC/\" + SUBJECT + \"/func/task-restNorm_acq-MB3_run-01\"\n ats_name = \"pb06.\" + SUBJECT + \".denoised_no_censor_ATS_abs_95.1D\"\n ATS = np.loadtxt(opj(MAINDIR, ats_name))\n ATLAS = opj(TEMPDIR, \"atlas.nii.gz\")\n DATAFILE = opj(MAINDIR, f\"pb06.{SUBJECT}.denoised_no_censor.nii.gz\")\n BETAFILE = opj(MAINDIR, f\"pb06.{SUBJECT}.denoised_no_censor_beta_95.nii.gz\")\n FITTFILE = opj(MAINDIR, f\"pb06.{SUBJECT}.denoised_no_censor_fitt_95.nii.gz\")\n AUCFILE = opj(MAINDIR, f\"{SUBJECT}_AUC_{NROIS}.nii.gz\")\n # Perform event detection on BETAS\n print(\"Performing event-detection on betas...\")\n (\n ets_beta,\n rss_beta,\n rssr_beta,\n idxpeak_beta,\n etspeaks_beta,\n mu_beta,\n _,\n _,\n _,\n ) = ev.event_detection(BETAFILE, ATLAS, opj(TEMPDIR, \"surrogate_\"), \"_beta_95\")\n\n # Perform event detection on ORIGINAL data\n print(\"Performing event-detection on original data...\")\n (\n ets_orig_sur,\n rss_orig_sur,\n rssr_orig_sur,\n idxpeak_orig_sur,\n etspeaks_orig_sur,\n mu_orig_sur,\n _,\n _,\n _,\n ) = ev.event_detection(DATAFILE, ATLAS, opj(TEMPDIR, \"surrogate_\"))\n\n # Perform event detection on FITTED signal\n print(\"Performing event-detection on fitted signal...\")\n (\n ets_fitt,\n rss_fitt,\n rssr_fitt,\n idxpeak_fitt,\n etspeaks_fitt,\n mu_fitt,\n _,\n _,\n _,\n ) = ev.event_detection(FITTFILE, ATLAS, opj(TEMPDIR, \"surrogate_\"), \"_fitt_95\")\n\n # Perform event detection on AUC\n print(\"Performing event-detection on AUC...\")\n (\n ets_auc,\n rss_auc,\n rssr_auc,\n idxpeak_auc,\n etspeaks_AUC,\n mu_AUC,\n ets_auc_denoised,\n idx_u,\n idx_v,\n ) = ev.event_detection(AUCFILE, ATLAS, opj(TEMPDIR, \"surrogate_AUC_\"))\n\n print(\"Making plots...\")\n # Plot comparison of rss time series, null, and significant peaks for\n # original, betas, fitted, AUC and ATS\n plot_comparison(\n rss_orig_sur,\n rssr_orig_sur,\n idxpeak_orig_sur,\n rss_fitt,\n rssr_fitt,\n idxpeak_fitt,\n rss_beta,\n rssr_beta,\n idxpeak_beta,\n rss_auc,\n rssr_auc,\n idxpeak_auc,\n ATS,\n MAINDIR,\n )\n\n # Plot all rss time series, null, and significant peaks in one plot\n plot_all(\n rss_orig_sur, idxpeak_orig_sur, rss_beta, idxpeak_beta, rss_fitt,\n idxpeak_fitt, MAINDIR\n )\n\n print(\"Plotting original, AUC, and AUC-denoised ETS matrices...\")\n # Plot ETS matrix of original signal\n DVARS = np.loadtxt(opj(ORIGDIR, SUBJECT + \"_dvars.1D\"))\n ENORM = np.loadtxt(opj(ORIGDIR, SUBJECT + \"_Motion_enorm.1D\"))\n plot_ets_matrix(ets_orig_sur, MAINDIR, \"_original\", DVARS, ENORM, idxpeak_auc)\n\n # Plot ETS and denoised ETS matrices of AUC\n plot_ets_matrix(ets_auc, MAINDIR, \"_AUC_original\", DVARS, ENORM, idxpeak_auc)\n plot_ets_matrix(ets_auc_denoised, MAINDIR, \"_AUC_denoised\", DVARS, ENORM, idxpeak_auc)\n\n # Save RSS time-series as text file for easier visualization on AFNI\n rss_out = np.zeros(rss_auc.shape)\n rss_out[idxpeak_auc] = rss_auc[idxpeak_auc]\n np.savetxt(opj(MAINDIR, f\"{DATAFILE[:-7]}_rss.1D\"), rss_out)\n\n # Perform debiasing based on thresholded edge-time matrix\n beta, _ = ev.debiasing(\n DATAFILE, ATLAS, ets_auc_denoised, idx_u, idx_v, TR, MAINDIR, HISTORY\n )\n\n print(\"Plotting edge-time matrix of ETS-based deconvolution.\")\n denoised_beta_ets, _, _ = ev.calculate_ets(beta, beta.shape[1])\n plot_ets_matrix(denoised_beta_ets, MAINDIR, \"_beta_denoised\",DVARS, ENORM, idxpeak_auc)\n\n print(\"THE END\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":10892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"216215519","text":"from django.urls import path\nfrom shop import views\n\nfrom django.contrib.auth import views as auth_views\n\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('category//', views.category, name='category'),\n path('item//', views.item_detail, name='item'),\n path('item//add_review/', views.add_review, name='add_review'),\n path('item//to_cart/', views.to_cart, name='to_cart'),\n path('cart/', views.cart_view, name='cart_view'),\n path('login/', auth_views.LoginView.as_view(), name='login'),\n path('logout/', auth_views.LogoutView.as_view(), name='logout'),\n path('create_order/', views.create_order, name='create_order')\n]\n\n\n","sub_path":"shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"222315423","text":"# -*- coding:ascii -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 10\n_modified_time = 1427932250.613265\n_enable_loop = True\n_template_filename = '/Users/jeffreymccraney/test_dmp/homepage/templates/festival.html'\n_template_uri = 'festival.html'\n_source_encoding = 'ascii'\nimport os, os.path, re\n_exports = ['content']\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n pass\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, 'base.htm', _template_uri)\ndef render_body(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n def content():\n return render_content(context._locals(__M_locals))\n Areas = context.get('Areas', UNDEFINED)\n __M_writer = context.writer()\n __M_writer('\\n\\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'):\n context['self'].content(**pageargs)\n \n\n __M_writer('\\n\\n\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_content(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n def content():\n return render_content(context)\n Areas = context.get('Areas', UNDEFINED)\n __M_writer = context.writer()\n __M_writer('\\n
\\n \\t

Colonial Heritage Foundation Information

\\n \\t
Click on the name of the area in order to reveal a list of Items be sold there!
\\n\\n \\t\\n\\t\\t\\t\\n\\t\\t\\t\\t\\n\\t\\t\\t\\t\\n\\t\\t\\t\\t\\n\\t\\t\\t\\n')\n for area in Areas:\n __M_writer('\\t\\t\\t\\n\\t\\t\\t\\t\\n\\t\\t\\t\\t\\n\\t\\t\\t\\t\\n\\t\\t\\t\\n')\n __M_writer('\\t
Area NameDescriptionArea Location #
')\n __M_writer(str( area.name ))\n __M_writer('')\n __M_writer(str( area.description ))\n __M_writer('')\n __M_writer(str( area.placeNumber ))\n __M_writer('
\\n\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n\"\"\"\n__M_BEGIN_METADATA\n{\"line_map\": {\"64\": 21, \"35\": 1, \"70\": 64, \"40\": 23, \"46\": 3, \"59\": 16, \"53\": 3, \"54\": 14, \"55\": 15, \"56\": 16, \"57\": 16, \"58\": 16, \"27\": 0, \"60\": 17, \"61\": 17, \"62\": 18, \"63\": 18}, \"filename\": \"/Users/jeffreymccraney/test_dmp/homepage/templates/festival.html\", \"source_encoding\": \"ascii\", \"uri\": \"festival.html\"}\n__M_END_METADATA\n\"\"\"\n","sub_path":"homepage/cached_templates/templates/festival.html.py","file_name":"festival.html.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"441226803","text":"import numpy as np\nimport random\nimport time\nfrom plot_util import plot_progress\nfrom logger import Logger\nfrom data_util.experiment_data_classes import Parameters\n\n\ndef train(width: int, length: int, params: Parameters, environment, visualize=False, plot=False, plot_interval=10, plot_moving_avg_period=100):\n q_table = np.zeros((width * length, 4))\n\n exploration_rate = params.start_exploration_rate\n for episode in range(params.num_episodes):\n if episode % 1000 == 0:\n Logger.debug(\"EPISODE: \", episode)\n state = environment.reset_agent()\n done = False\n rewards_current_episode = 0\n max_reward_current_episode = 0\n\n for step in range(params.max_steps_per_episode):\n exploration_rate_threshold = random.uniform(0, 1)\n if exploration_rate_threshold > exploration_rate:\n action = np.argmax(q_table[state, :])\n else:\n action = random.choice(\n environment.get_agent_possible_actions()\n )\n new_state, reward = environment.agent_perform_action(action)\n q_table[state, action] = q_table[state, action] * (1 - params.learning_rate) + params.learning_rate * (\n reward + params.discount_rate * np.max(q_table[new_state, :]))\n\n state = new_state\n rewards_current_episode += reward\n if max_reward_current_episode < reward:\n max_reward_current_episode = reward\n\n if visualize:\n # environment.clear()\n # environment.render()\n environment.redraw_agent()\n time.sleep(0.04)\n\n exploration_rate = params.min_exploration_rate + (params.max_exploration_rate - params.min_exploration_rate) * np.exp(\n -params.exploration_decay_rate * episode)\n # print(\"Exploration Rate: \" + exploration_rate.__str__())\n # print(max_reward_current_episode)\n params.rewards_all_episodes.append(rewards_current_episode)\n params.max_rewards_all_episodes.append(max_reward_current_episode)\n if episode % plot_interval == 0:\n plot_progress(params.rewards_all_episodes, exploration_rate, plot_moving_avg_period)\n\n return q_table, params\n","sub_path":"rl_algorithms/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"122170222","text":"import logging\nimport os\nimport re\nimport tempfile\nimport unittest\n\nimport cfn_lambda_extractor\n\nclass Test(unittest.TestCase):\n def setUp(self):\n d = os.path.dirname(os.path.realpath(__file__))\n self.testdata_dir = d + \"/testdata/\"\n test_template = self.testdata_dir + \"test_template.yaml\"\n self.data = cfn_lambda_extractor.load_input_file(test_template)\n\n def test_template_extraction(self):\n values = {\"ValueToSub1\": \"test1234\",\"ValueToSub2\": \"test4321\"}\n fns = cfn_lambda_extractor.extract_functions(self.data, values)\n\n efn1 = open(self.testdata_dir + \"expected_output_fn1.py\")\n expected_output_fn1 = efn1.read()\n efn1.close()\n\n efn2 = open(self.testdata_dir + \"expected_output_fn2.py\")\n expected_output_fn2 = efn2.read()\n efn2.close()\n\n self.assertEqual(\"\".join(fns[\"1\"]), expected_output_fn2.rstrip('\\n'))\n self.assertEqual(\"\".join(fns[\"0\"]), expected_output_fn1.rstrip('\\n'))\n\n def test_value_not_provided(self):\n with self.assertRaises(Exception) as e:\n cfn_lambda_extractor.extract_functions(self.data, {})\n err = str(e.exception)\n self.assertTrue(re.match(\"Value 'ValueToSub.' not provided.\", err))\n\n def test_no_resources(self):\n with self.assertRaises(Exception) as e:\n cfn_lambda_extractor.extract_functions(\"\", {})\n self.assertEqual(str(e.exception), \"No Resources in template.\")\n\n def test_parse_input(self):\n self.assertEqual(cfn_lambda_extractor.parse_csv_input_values(\"a=1,b=2\"), {\"a\":'1', \"b\":'2'})\n\n def test_replace_values_in_line(self):\n s = \"name = ${AccountId}-${Region}\"\n v = {\"AccountId\": \"123443211234\", \"Region\": \"us-west-2\"}\n self.assertEqual(cfn_lambda_extractor.replace_values_in_line(s, v), \"name = 123443211234-us-west-2\")\n\nlogging.basicConfig(level=logging.CRITICAL)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"cfn_lambda_extractor/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"470446660","text":"import os\nfrom google.appengine.api import users\n\nimport logging\ndef AuthenticateUser(self):\n user = users.get_current_user()\n if not user:\n login_url = users.create_login_url(self.request.url)\n greeting = 'Sign in'.format(login_url)\n self.response.write('{}'.format(greeting))\n return False\n\n else:\n return user\n\n\ndef django_template(file_name):\n logging.error(os.path.join(os.path.dirname(__file__), \"templates\", file_name))\n return os.path.join(os.path.dirname(__file__), \"templates\", file_name)\n","sub_path":"lofty-buttress-136214/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"115875546","text":"\"\"\"\r\nUzrakstiet programmu Python, lai pārbaudītu, \r\nvai vairākiem ievadītajiem mainīgajiem ir vienāda vērtība.\r\n\"\"\"\r\nfrom typing import DefaultDict, no_type_check_decorator\r\n\r\n\r\nx=int(input(\"Ievadi pirmo ciparu\"))\r\ny=int(input(\"Ievadi otro ciparu\"))\r\na=int(input(\"Irvadi trešo ciparu\"))\r\n\r\nif x==y==a:\r\n print(\"Visiem skaitļiem ir vienāda vērtība!\")\r\nif x!=y!=a:\r\n print(\"Visiem nav vienāda vērtība!\")","sub_path":"6uzdevums.py","file_name":"6uzdevums.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"391476760","text":"import csv\n\n# Required task 1. Read the data from the spreadsheet\n\nwith open('sales.csv', 'r') as csv_file:\n spreadsheet = csv.DictReader(csv_file)\n\n# Required task 2. Collect all of the sales from each month into a single list\n total_sales = []\n\n for row in spreadsheet:\n monthly_sales = row['sales']\n total_sales.append(int(monthly_sales))\n\n# Required task 3. Output the total sales across all months\n\nprint(f'The total sales across all {len(total_sales)} months are {sum(total_sales)}')\n\n","sub_path":"Project_Spreadsheet_Analysis.py","file_name":"Project_Spreadsheet_Analysis.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"476327043","text":"# -*- coding: utf-8 -*-\n\n# tooltip.py\n\nimport sys\n#from PyQt4.QtGui import *\n#from PyQt4 import QtCore\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\n\n\nclass Tooltip(QWidget):\n def __init__(self, parent=None):\n QWidget.__init__(self, parent)\n \n self.setGeometry(300, 300, 250, 150)\n self.setWindowTitle('Tooltip')\n \n self.setToolTip('This is a QWidget widget')\n QToolTip.setFont(QFont('OldEnglish', 10))\n\n\n\napp = QApplication(sys.argv)\ntooltip = Tooltip()\ntooltip.show()\nsys.exit(app.exec())\n\n\n","sub_path":"PyXGui/showcase/sample/tooltip.py","file_name":"tooltip.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"348418028","text":"#!/usr/bin/python\n\"\"\"STATIC SETTINGS\nsettings for static content\nThese are default somewhat sane values that\nalmost certainly will not work off-the shelf and\nshould be modified \"\"\"\n\nimport sys,os\nSTATIC_ROOT = '/data/www/static/'\nSTATIC_URL = 'http://127.0.0.1/static/'\n\n#WEBUSER necessesary for uploads??\nWEBUSER = 'backstage'\n\nSTATICFILES_DIRS = []\n\n#MEDIA_ROOT set within site settings.py\nMEDIA_ROOT_BASE = '/data/www/content/site/'\nMEDIA_URL = 'http://127.0.0.1/media/'\n\nADMIN_MEDIA_PREFIX = STATIC_URL + \"grappelli/\"\nAUTOCOMPLETE_MEDIA_PREFIX='/media/autocomplete'\n\n","sub_path":"backstage/settings/static_settings.py","file_name":"static_settings.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"1524708","text":"#########################################################################\n# Copyright 2011 Cloud Sidekick\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#########################################################################\n\nimport sys\n\nif sys.version_info < (2, 7):\n import catoxml.etree.ElementTree as ET\nelse:\n try:\n import xml.etree.cElementTree as ET\n except (AttributeError, ImportError):\n import xml.etree.ElementTree as ET\n\ndef vmw_list_images(TE, step):\n\n import catosphere\n instance_uuid, endpoint_name = TE.get_command_params(step.command, \"instance_uuid\", \"endpoint_name\")[:]\n instance_uuid = TE.replace_variables(instance_uuid)\n endpoint_name = TE.replace_variables(endpoint_name)\n\n cloud = TE.get_cloud_connection(endpoint_name)\n\n values = []\n root = ET.fromstring(step.command)\n filters = root.findall(\"filters/filter\")\n if filters:\n the_filter = {}\n for f in filters:\n name = f.findtext(\"./name\", \"\")\n if len(name):\n values = f.findall(\"./values/value\")\n value_list = []\n for v in values:\n # TE.logger.debug(\"value is %s\" % (v.findtext(\".\", \"\")))\n value_list.append(v.findtext(\".\", \"\"))\n the_filter[name] = value_list\n else:\n the_filter = None\n \n instances = cloud.server.list_instances(instanceUuid=instance_uuid, filter=the_filter)\n # TE.logger.info(instances)\n\n results = []\n msg = \"%s\\n\" % (catosphere.get_all_property_names())\n if len(instances):\n\n for i in instances:\n prop_list = i.get_properties()\n results.append(prop_list)\n msg = \"%s\\n%s\" % (msg, prop_list)\n\n if len(results):\n variables = TE.get_node_list(step.command, \"step_variables/variable\", \"name\", \"position\")\n TE.process_list_buffer(results, variables)\n\n TE.insert_audit(\"vmw_list_images\", msg, \"\")\n\ndef vmw_clone_image(TE, step):\n\n instance_uuid, endpoint_name, name, folder, resourcepool, power_on = TE.get_command_params(step.command,\n \"instance_uuid\", \"endpoint_name\", \"name\", \"folder\", \"resourcepool\", \"power_on\")[:]\n instance_uuid = TE.replace_variables(instance_uuid)\n endpoint_name = TE.replace_variables(endpoint_name)\n TE.logger.debug(\"endpoint name = %s\" % (endpoint_name))\n name = TE.replace_variables(name)\n folder = TE.replace_variables(folder)\n resourcepool = TE.replace_variables(resourcepool)\n power_on = TE.replace_variables(power_on)\n\n if len(instance_uuid) == 0:\n raise Exception(\"InstanceUUID field is required for VMware Clone command\")\n if len(name) == 0:\n raise Exception(\"Name field is required for VMware Clone command\")\n cloud = TE.get_cloud_connection(endpoint_name)\n \n instance = cloud.server.list_instances(instanceUuid=instance_uuid)[0]\n result = instance.clone(name=name)\n msg = \"VMware Image Clone %s\\nOK\" % (instance_uuid)\n TE.insert_audit(\"vmw_clone_image\", msg, \"\")\n TE.logger.info(result)\n \ndef vmw_power_on_image(TE, step):\n vmw_power_image(TE, step.command, \"on\")\n \ndef vmw_power_off_image(TE, step):\n vmw_power_image(TE, step.command, \"off\")\n \ndef vmw_power_image(TE, command, on_off):\n\n instance_uuid, endpoint_name = TE.get_command_params(command, \"instance_uuid\", \"endpoint_name\")[:]\n instance_uuid = TE.replace_variables(instance_uuid)\n endpoint_name = TE.replace_variables(endpoint_name)\n\n if len(instance_uuid) == 0:\n raise Exception(\"InstanceUUID field is required for VMware Power commands\")\n\n cloud = TE.get_cloud_connection(endpoint_name)\n \n if on_off == \"on\":\n result = cloud.server.power_on_vm(instance_uuid)\n msg = \"VMware Image Power On %s\\nOK\" % (instance_uuid)\n TE.insert_audit(\"vmw_power_on_image\", msg, \"\")\n elif on_off == \"off\":\n result = cloud.server.power_off_vm(instance_uuid)\n msg = \"VMware Image Power Off %s\\nOK\" % (instance_uuid)\n TE.insert_audit(\"vmw_power_off_image\", msg, \"\")\n\n TE.logger.info(result)\n","sub_path":"extensions/vmware/vmware.py","file_name":"vmware.py","file_ext":"py","file_size_in_byte":4623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"35800375","text":"# -*- coding: utf-8 -*-\n#COMECE AQUI ABAIXO\nprimo = int(input(\"digite o número: \"))\nfor i in range(2,primo):\n if i != primo:\n i = primo % i\n if i==0:\n print(\"Não é primo\")\n else:\n print(\"É Primo\")\n \n \n\n\n \n","sub_path":"moodledata/vpl_data/59/usersdata/239/30811/submittedfiles/testes.py","file_name":"testes.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"72826454","text":"#!/usr/bin/python3\n\"\"\"LIFOCache module\n\"\"\"\nfrom base_caching import BaseCaching\n\n\nclass LIFOCache(BaseCaching):\n \"\"\"LIFOCache class\n\n Args:\n BaseCaching (class): Basic class for this class\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.__keys = []\n\n def put(self, key, item):\n \"\"\"put item into cache_data with LIFO algorithm\n\n Args:\n key ([type]): key of dictionary\n item ([type]): item to insert in dictionary\n \"\"\"\n if len(self.cache_data) == self.MAX_ITEMS and key not in self.__keys:\n discard = self.__keys.pop()\n del self.cache_data[discard]\n print('DISCARD: {}'.format(discard))\n if key and item:\n self.__keys.append(key)\n self.cache_data[key] = item\n\n def get(self, key):\n \"\"\"get value of cache_data dictionary\n\n Args:\n key ([type]): key to search into cache_data\n \"\"\"\n if not key or key not in self.cache_data:\n return None\n return self.cache_data[key]\n","sub_path":"0x03-caching/2-lifo_cache.py","file_name":"2-lifo_cache.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"377335871","text":"from django.shortcuts import render\nfrom django.core.exceptions import ValidationError\nfrom django.http import HttpResponse, JsonResponse\n# Create your views here.\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.models import User\nfrom chat.serializers import UserListSerializer, UserCreateSerializer, MessageSerializer, LastMessageIdSerializer\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework import status, permissions\nfrom rest_framework.reverse import reverse\nfrom chat.models import Message\n\nimport time\n\n\ndef client(request):\n\treturn render(request, \"client.html\", {})\n\n@api_view(['GET'])\n@permission_classes((permissions.AllowAny, ))\ndef api_root(request):\n\treturn Response({\n\t\t\t\"users\": reverse('user-list', request=request),\n\t\t\t\"messages\": reverse('message-list', request=request)\n\t\t}) \n\n@api_view(['GET', 'POST'])\n@permission_classes((permissions.AllowAny, ))\ndef user_list(request):\n\tif request.method == 'GET':\n\t\tusers = User.objects.all()\n\t\tserializer = UserListSerializer(users, many=True)\n\t\treturn Response(serializer.data)\n\telif request.method == 'POST':\n\t\tprint(request.data)\n\t\tserializer = UserCreateSerializer(data=request.data)\n\t\tif serializer.is_valid():\n\t\t\tserializer.save()\n\t\t\treturn Response(serializer.data, status=status.HTTP_201_CREATED)\n\t\treturn Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(['GET', 'DELETE'])\n@permission_classes((permissions.IsAdminUser, ))\ndef user_del(request, pk):\n\ttry:\n\t\tuser = User.objects.get(pk=pk)\n\texcept User.DoesNotExist:\n\t\treturn Response(status=status.HTTP_404_NOT_FOUND)\n\n\tif request.method == 'GET':\n\t\tserializer = UserCreateSerializer(user)\n\t\treturn Response(serializer.data)\n\telif request.method == 'DELETE':\n\t\tuser.delete()\n\t\treturn Response(status=status.HTTP_204_NO_CONTENT)\n\n\n# Polling\nPOLLING_INTERVAL = 15\n\n@api_view(['POST'])\n@permission_classes((permissions.AllowAny,))\ndef poll(request):\n\tprint(request.data)\n\tmid_serializer = LastMessageIdSerializer(data=request.data)\n\tif mid_serializer.is_valid():\n\t\tlast_mid = request.data.get('last_mid', 1)\n\t\tfor _ in range(POLLING_INTERVAL):\n\t\t\tmessages = Message.objects.get_messages(int(last_mid))\n\t\t\tmcount = messages.count()\n\t\t\tif mcount > 30:\n\t\t\t\tmessages = messages[mcount - 30:]\n\t\t\tif mcount == 0:\n\t\t\t\ttime.sleep(1)\n\t\t\t\tcontinue\n\n\t\t\tserializer = MessageSerializer(messages, many=True)\n\t\t\tlast_mid = max(m.pk for m in messages)\n\t\t\treturn Response({\n\t\t\t\t\t\"last_mid\": last_mid,\n\t\t\t\t\t\"messages\": serializer.data\n\t\t\t\t})\t\n\t\treturn Response({\n\t\t\t\t'message': \"OK\"\n\t\t\t\t})\n\treturn Response(mid_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n@api_view([\"POST\"])\n@permission_classes((permissions.IsAuthenticated, ))\ndef send(request):\n\tserializer = MessageSerializer(data=request.data)\n\tif serializer.is_valid():\n\t\tserializer.save(user=request.user)\n\t\treturn Response(serializer.data, status=status.HTTP_201_CREATED)\n\treturn Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(['GET'])\n@permission_classes((permissions.IsAuthenticated, ))\ndef message_list(request):\n\tmessages = Message.objects.all()\n\tserializer = MessageSerializer(messages, many=True)\n\treturn Response(serializer.data)\n\n@api_view(['GET'])\n@permission_classes((permissions.AllowAny, ))\ndef message_history(request):\n\tdate = request.GET.get('date', '')\n\ttry:\n\t\tmessages = Message.objects.message_history(date)\n\texcept ValidationError:\n\t\treturn Response(status=status.HTTP_400_BAD_REQUEST)\n\n\tserializer = MessageSerializer(messages, many=True)\n\treturn Response(serializer.data)\n","sub_path":"chat/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"362744155","text":"import time\nimport pandas as pd\nimport numpy as np\n\nCITY_DATA = { 'CHI': 'chicago.csv',\n 'NYC': 'new_york_city.csv',\n 'DC': 'washington.csv' }\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n\n print('\\nHello! Let\\'s explore some US bikeshare data!')\n\n cities = ('CHI', 'NYC', 'DC')\n months = ('all', 'january', 'february', 'march', 'april', 'may', 'june')\n days = ('all', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday')\n\n while True:\n city = (input(\"What city would you like to explore? Choose from: CHI, NYC, DC. \")).upper()\n if city not in cities:\n print('Sorry, this is not an option. Try again:' )\n else:\n break\n\n while True:\n month = (input(\"Are you interested in all data or filtered by month? Choose from all, january, february, march, april, may, june. \")).lower()\n if month not in months:\n print('Sorry, this is not an option. Try again:' )\n else:\n break\n\n while True:\n day = (input(\"Are you interested in all data or filtered by day? Choose from all, monday, tuesday, wednesday, thursday, friday. \")).lower()\n if day not in days:\n print('Sorry, this is not an option. Try again:' )\n else:\n break\n\n print('-'*40)\n print('Here we go!!')\n print('-'*40)\n return city, month, day\n\n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df\n\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n\n # output: most common month\n # output: most common day of week\n # output: most common start hour\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n df['month'] = df['Start Time'].dt.month\n common_month = df['month'].mode()[0]\n print('Most Common Start Month: {}'.format(common_month))\n\n df['day'] = df['Start Time'].dt.weekday_name\n common_day = df['day'].mode()[0]\n print('Most Common Start Day: {}'.format(common_day))\n\n df['hour'] = df['Start Time'].dt.hour\n common_hour = df['hour'].mode()[0]\n print('Most Common Start Hour: {}'.format(common_hour))\n\n\n print('-'*40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and route.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Route...\\n')\n\n\n # output: most commonly used start station\n # output: most commonly used end station\n # output: most frequent combination of start station and end station\n\n popular_start = df['Start Station'].mode()[0]\n print('The most popular start station is: {}'.format(popular_start))\n\n popular_end = df['End Station'].mode()[0]\n print('The most popular end station is: {}'.format(popular_end))\n\n # create new Route column\n df['Route'] = df['Start Station'] + \" ...TO... \" + df['End Station']\n popular_route = df['Route'].mode()[0]\n print('The most popular route is: {}'.format(popular_route))\n\n\n print('-'*40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n\n\n # output: total travel time\n # output: mean travel time\n\n total_travel = df['Trip Duration'].sum()\n print('The total travel time was:')\n print('...in minutes: ', total_travel/60)\n print('...in hours: ', total_travel/3600)\n print()\n mean_travel = df['Trip Duration'].mean()\n print('The mean travel time was:')\n print('...in minutes: ', mean_travel/60)\n\n\n print('-'*40)\n\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n\n\n # output: counts of user types\n # output: counts of gender\n # output: show earliest, most recent, and most common year of birth\n\n user_types = df['User Type'].value_counts()\n print('These are our users:\\n', user_types)\n\n print()\n if 'Gender' in df.columns:\n gender = df['Gender'].value_counts()\n print('This is the gender distribution:\\n', gender)\n else:\n print('No Gender info available for DC')\n\n print()\n if 'Birth Year' in df.columns:\n oldest = df['Birth Year'].min()\n print('Our oldest user was born in: {}'.format(oldest))\n youngest = df['Birth Year'].max()\n print('Our youngest user was born in: {}'.format(youngest))\n common_year = df['Birth Year'].mode()[0]\n print('Most users were born in: {}'.format(common_year))\n else:\n print('No Birth Year info available for DC')\n\n print('-'*40)\n\n\ndef show_raw(df):\n \"\"\"Asks if interested in raw data\"\"\"\n\n lower_bound = 0\n upper_bound = 5\n\n while True:\n show_raw = input('Would you like to see 5 or more rows of raw data? Enter yes or no.\\n')\n if show_raw.lower() != 'yes':\n break\n else:\n print(df[df.columns[0:]].iloc[lower_bound:upper_bound])\n lower_bound += 5\n upper_bound += 5\n\n\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n show_raw(df)\n\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":7045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"60762797","text":"\"\"\"\nYou’re about to set sail off a pier and first want to count the number\nof ships that are already in the harbor. The harbor is deemed safe to\nsail in if the number of boats in the harbor is strictly less than\nlimit. Given a 2D array that presents the harbor, where O represents\nwater and S represents a ship, return whether or not it’s safe for you\nto set sail.\n\nNote: All ships in the harbor can only lie entirely vertically or\nentirely horizontally and cannot be connected to another ship.\n\nEx: Given the following 2D array harbor and value limit...\n\nharbor = [\n [O, O, S],\n [S, O, O],\n [O, O, S]\n], limit = 5, return true.\nYou setting sail would cause there to be 4 ships in the harbor which is\nunder the limit of 5.\n\nEx: Given the following 2D array harbor and value limit...\n\nharbor = [\n [O, O, O],\n [S, O, S],\n [O, O, S]\n], limit = 3, return false.\nThe harbor is not safe to sail in since you setting sail would cause the\nnumber of boats in the harbor to reach the limit.\n\"\"\"\n\n\nfrom utils.grid import count_entities\n\n\ndef main() -> None:\n \"\"\"Main function\"\"\"\n harbor = [\n ['O', 'O', 'S'],\n ['S', 'O', 'O'],\n ['O', 'O', 'S']\n ]\n limit = 5\n # harbor = [\n # ['O', 'O', 'O'],\n # ['S', 'O', 'S'],\n # ['O', 'O', 'S']\n # ]\n # limit = 3\n\n count = count_entities(harbor, 'S')\n print(count+1 < limit)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"p120_setting_sail.py","file_name":"p120_setting_sail.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"203917642","text":"import time\n\nimport pandas as pd\nimport re\n\nfrom lxml import etree\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom lmf.dbv2 import db_write\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException\nfrom selenium.common.exceptions import WebDriverException\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nfrom zhulong.util.etl import add_info,est_meta,est_html,est_tbs\n\n_name_='leling'\n\n\ndef f1(driver, num):\n locator = (By.XPATH, \"(//a[@class='ewb-list-name'])[1]\")\n WebDriverWait(driver, 10).until(EC.presence_of_element_located(locator))\n # cnum=int(driver.find_element_by_xpath(\"//span[@class='pageBtnWrap']/span[@class='curr']\").text)\n # 获取当前页的url\n url = driver.current_url\n # print(url)\n if \"Paging=\" not in url:\n url = url + \"?Paging=1\"\n driver.get(url)\n cnum = 1\n else:\n cnum = int(re.findall(\"Paging=(\\d+)\", url)[0])\n if num != cnum:\n if num == 1:\n url = re.sub(\"Paging=[0-9]*\", \"Paging=1\", url)\n else:\n s = \"Paging=%d\" % (num) if num > 1 else \"Paging=1\"\n url = re.sub(\"Paging=[0-9]*\", s, url)\n # print(cnum)\n val = driver.find_element_by_xpath(\"(//a[@class='ewb-list-name'])[1]\").text\n\n driver.get(url)\n time.sleep(1)\n # print(\"1111\")\n locator = (By.XPATH, '//td[@class=\"huifont\"]')\n page_all = WebDriverWait(driver, 10).until(EC.presence_of_element_located(locator)).text\n page = re.findall(r'(\\d+)/', page_all)[0]\n if int(page) != num:\n locator = (By.XPATH, \"(//a[@class='ewb-list-name'])[1][string()!='%s']\" % val)\n WebDriverWait(driver, 10).until(EC.presence_of_element_located(locator))\n\n page = driver.page_source\n soup = BeautifulSoup(page, 'lxml')\n tb = soup.find(\"table\", cellspacing=\"3\", align=\"center\")\n trs = tb.find_all(\"tr\")\n data = []\n for li in trs[1:]:\n # print(li)\n a = li.find(\"a\")\n title = a['title']\n # print(a[\"title\"])\n link = \"http://ll.dzzyjy.gov.cn\" + a[\"href\"]\n span = li.find(\"font\")\n tmp = [title.strip(), span.text.strip(), link]\n data.append(tmp)\n\n # print(data)\n df = pd.DataFrame(data=data)\n df[\"info\"]=None\n return df\n\n\n\n\ndef f2(driver):\n\n locator = (By.XPATH, '//td[@class=\"huifont\"]')\n page_all = WebDriverWait(driver, 10).until(EC.presence_of_element_located(locator)).text\n page = re.findall(r'/(\\d+)', page_all)[0]\n\n driver.quit()\n return int(page)\n\ndef f3(driver,url):\n\n\n driver.get(url)\n time.sleep(1)\n try:\n locator=(By.CLASS_NAME,\"ewb-right-info\")\n\n WebDriverWait(driver,2).until(EC.presence_of_all_elements_located(locator))\n except:\n locator=(By.XPATH,\"//div[contains(@id,'menutab')][@style='']\")\n\n WebDriverWait(driver,5).until(EC.presence_of_all_elements_located(locator))\n\n before=len(driver.page_source)\n time.sleep(0.1)\n after=len(driver.page_source)\n i=0\n while before!=after:\n before=len(driver.page_source)\n time.sleep(0.1)\n after=len(driver.page_source)\n i+=1\n if i>5:break\n\n page=driver.page_source\n\n soup=BeautifulSoup(page,'lxml')\n if 'ewb-right-info' in page:\n div=soup.find('div',class_='ewb-right-info')\n else:\n div=soup.find(\"div\",id=re.compile(\"menutab.*\"),style='')\n #div=div.find_all('div',class_='ewb-article')[0]\n \n return div\n\n\n\ndata = [\n [\"gcjs_zhaobiao_gg\",\"http://ll.dzzyjy.gov.cn/TPFront_laoling/xmxx/004001/004001001/004001001002/?Paging=1\",\n [\"name\", \"ggstart_time\", \"href\",\"info\"],f1,f2],\n\n [\"gcjs_biangeng_gg\",\"http://ll.dzzyjy.gov.cn/TPFront_laoling/xmxx/004001/004001002/004001002002/?Paging=1\",\n [\"name\", \"ggstart_time\", \"href\",\"info\"],f1,f2],\n\n [\"gcjs_zhongbiao_gg\",\"http://ll.dzzyjy.gov.cn/TPFront_laoling/xmxx/004001/004001003/004001003002/?Paging=1\",\n [\"name\", \"ggstart_time\", \"href\",\"info\"],f1,f2],\n\n [\"gcjs_yucai_gg\", \"http://ll.dzzyjy.gov.cn/TPFront_laoling/xmxx/004001/004001005/004001005002/?Paging=1\",\n [\"name\", \"ggstart_time\", \"href\",\"info\"],f1,f2],\n\n\n [\"zfcg_zhaobiao_gg\", \"http://ll.dzzyjy.gov.cn/TPFront_laoling/xmxx/004002/004002001/004002001002/?Paging=1\",\n [\"name\", \"ggstart_time\", \"href\",\"info\"],f1,f2],\n\n [\"zfcg_biangeng_gg\", \"http://ll.dzzyjy.gov.cn/TPFront_laoling/xmxx/004002/004002002/004002002002/?Paging=1\",\n [\"name\", \"ggstart_time\", \"href\",\"info\"],f1,f2],\n\n [\"zfcg_zhongbiao_gg\", \"http://ll.dzzyjy.gov.cn/TPFront_laoling/xmxx/004002/004002003/004002003002/?Paging=1\",\n [\"name\", \"ggstart_time\", \"href\",\"info\"],f1,f2],\n\n [\"zfcg_yucai_gg\",\"http://ll.dzzyjy.gov.cn/TPFront_laoling/xmxx/004002/004002005/004002005002/?Paging=1\",\n [\"name\", \"ggstart_time\", \"href\",\"info\"],f1,f2],\n\n [\"zfcg_hetong_gg\", \"http://ll.dzzyjy.gov.cn/TPFront_laoling/xmxx/004002/004002006/004002006002/?Paging=1\",\n [\"name\", \"ggstart_time\", \"href\",\"info\"],f1,f2],\n\n ]\n\ndef work(conp,**args):\n est_meta(conp,data=data,diqu=\"山东省乐陵市\",num=5)\n est_html(conp,f=f3,**args)\n\nif __name__=='__main__':\n work(conp=[\"postgres\",\"since2015\",\"127.0.0.1\",\"shandong\",\"leling\"])\n\n#est_tbs(conp=[\"postgres\",\"since2015\",\"127.0.0.1\",\"shandong\",\"leling\"],data=data,total=1,num=1)","sub_path":"work/zhu/shandong/leling.py","file_name":"leling.py","file_ext":"py","file_size_in_byte":5543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"299175269","text":"import validictory\n\nfrom flask import request\n\nfrom viaduct import application, db\nfrom viaduct.helpers import Resource\nfrom viaduct.helpers.api import make_api_response\n\nfrom viaduct.models import Degree\n\n\nclass DegreeAPI(Resource):\n @staticmethod\n def register():\n view = DegreeAPI.as_view('degree_api')\n\n application.add_url_rule('/api/degrees/', view_func=view,\n methods=['DELETE', 'GET', 'POST'])\n\n @staticmethod\n def get(degree_id=None):\n if degree_id:\n degree = Degree.query.get(degree_id)\n\n if not degree:\n return make_api_response(400, 'No object has been associated '\n 'with the degree ID that has been '\n 'specified.')\n\n return degree.to_dict()\n else:\n results = []\n\n for degree in Degree.query.all():\n results.append(degree.to_dict())\n\n return results\n\n @staticmethod\n def post():\n data = request.json\n schema = {'type': 'object',\n 'properties': {'name': {'type': 'string'},\n 'abbreviation': {'type': 'string'}}}\n\n try:\n validictory.validate(data, schema)\n except Exception:\n return make_api_response(400,\n 'Data does not correspond to scheme.')\n\n if Degree.query.filter(Degree.name == data['name']).count() > 0:\n return make_api_response(400, 'There is already an object with '\n 'the name that has been specified.')\n\n degree = Degree(data['name'], data['abbreviation'])\n db.session.add(degree)\n db.session.commit()\n\n return degree.to_dict(), '201 The object has been created.'\n\n @staticmethod\n def delete(degree_id=None):\n if degree_id:\n degree = Degree.query.get(degree_id)\n\n if not degree:\n return make_api_response(400, 'No object has been associated '\n 'with the degree ID that has been '\n 'specified')\n\n db.session.delete(degree)\n db.session.commit()\n\n return make_api_response(204, 'The object has been deleted')\n else:\n return make_api_response(400, 'TODO')\n","sub_path":"viaduct/api/degree.py","file_name":"degree.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"487225116","text":"import os\nimport sqlite3\nfrom flask import Flask, request, session, g, redirect, url_for, abort, \\\n render_template, flash, jsonify\n\nfrom engine_files.input import Input\nfrom engine_files.condition import Condition\n\n\n# create our little application :)\napp = Flask(__name__)\napp.config.from_object(__name__)\n\n\n@app.route('/hello')\ndef hello():\n return 'Hello, World Xiaozhou'\n\n@app.route('/', methods=['GET', 'POST'])\ndef home():\n if request.method == 'POST':\n data = {'request_form': str(request.form)}\n return jsonify(data)\n else:\n return render_template('index.html')\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n","sub_path":"dt.py","file_name":"dt.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"371580026","text":"\"\"\" Adapted from the original implementation. \"\"\"\n\nimport collections\nimport dataclasses\nfrom typing import List\n\nimport torch\n\n\n@dataclasses.dataclass\nclass VoVNetParams:\n stem_out: int\n stage_conv_ch: List[int] # Channel depth of\n stage_out_ch: List[int] # The channel depth of the concatenated output\n layer_per_block: int\n block_per_stage: List[int]\n dw: bool\n\n\n_STAGE_SPECS = {\n \"vovnet-19-slim-dw\": VoVNetParams(\n 64, [64, 80, 96, 112], [112, 256, 384, 512], 3, [1, 1, 1, 1], True\n ),\n \"vovnet-19-dw\": VoVNetParams(\n 64, [128, 160, 192, 224], [256, 512, 768, 1024], 3, [1, 1, 1, 1], True\n ),\n \"vovnet-19-slim\": VoVNetParams(\n 128, [64, 80, 96, 112], [112, 256, 384, 512], 3, [1, 1, 1, 1], False\n ),\n \"vovnet-19\": VoVNetParams(\n 128, [128, 160, 192, 224], [256, 512, 768, 1024], 3, [1, 1, 1, 1], False\n ),\n \"vovnet-39\": VoVNetParams(\n 128, [128, 160, 192, 224], [256, 512, 768, 1024], 5, [1, 1, 2, 2], False\n ),\n \"vovnet-57\": VoVNetParams(\n 128, [128, 160, 192, 224], [256, 512, 768, 1024], 5, [1, 1, 4, 3], False\n ),\n \"vovnet-99\": VoVNetParams(\n 128, [128, 160, 192, 224], [256, 512, 768, 1024], 5, [1, 3, 9, 3], False\n ),\n}\n\n_BN_MOMENTUM = 1e-1\n_BN_EPS = 1e-5\n\n\ndef dw_conv(\n in_channels: int, out_channels: int, stride: int = 1\n) -> List[torch.nn.Module]:\n \"\"\" Depthwise separable pointwise linear convolution. \"\"\"\n return [\n torch.nn.Conv2d(\n in_channels,\n in_channels,\n kernel_size=3,\n padding=1,\n stride=stride,\n groups=in_channels,\n bias=False,\n ),\n torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=True),\n torch.nn.BatchNorm2d(out_channels, eps=_BN_EPS, momentum=_BN_MOMENTUM),\n torch.nn.ReLU(inplace=True),\n ]\n\n\ndef conv(\n in_channels: int,\n out_channels: int,\n stride: int = 1,\n groups: int = 1,\n kernel_size: int = 3,\n padding: int = 1,\n) -> List[torch.nn.Module]:\n \"\"\" 3x3 convolution with padding.\"\"\"\n return [\n torch.nn.Conv2d(\n in_channels,\n out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n groups=groups,\n bias=False,\n ),\n torch.nn.BatchNorm2d(out_channels, eps=_BN_EPS, momentum=_BN_MOMENTUM),\n torch.nn.ReLU(inplace=True),\n ]\n\n\ndef pointwise(in_channels: int, out_channels: int) -> List[torch.nn.Module]:\n \"\"\" Pointwise convolution.\"\"\"\n return [\n torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=True),\n torch.nn.BatchNorm2d(out_channels, eps=_BN_EPS, momentum=_BN_MOMENTUM),\n torch.nn.ReLU(inplace=True),\n ]\n\n\nclass ESE(torch.nn.Module):\n \"\"\"This is adapted from the efficientnet Squeeze Excitation. The idea is to not\n squeeze the number of channels to keep more information.\"\"\"\n\n def __init__(self, channel: int) -> None:\n super().__init__()\n self.avg_pool = torch.nn.AdaptiveAvgPool2d(1)\n self.fc = torch.nn.Conv2d(channel, channel, kernel_size=1) # (Linear)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n out = self.avg_pool(x)\n out = self.fc(out)\n return torch.sigmoid(out) * x\n\n\nclass OSA(torch.nn.Module):\n def __init__(\n self,\n in_channels: int,\n stage_channels: int,\n concat_channels: int,\n layer_per_block: int,\n use_depthwise: bool = False,\n ) -> None:\n \"\"\" Implementation of an OSA layer which takes the output of its conv layers and \n concatenates them into one large tensor which is passed to the next layer. The\n goal with this concatenation is to preserve information flow through the model\n layers. This also ends up helping with small object detection. \n \n Args:\n in_channels: Channel depth of the input to the OSA block.\n stage_channels: Channel depth to reduce the input.\n concat_channels: Channel depth to force on the concatenated output of the\n comprising layers in a block.\n layer_per_block: The number of layers in this OSA block.\n use_depthwise: Wether to use depthwise separable pointwise linear convs.\n \"\"\"\n super().__init__()\n # Keep track of the size of the final concatenation tensor.\n aggregated = in_channels\n self.isReduced = in_channels != stage_channels\n\n # If this OSA block is not the first in the OSA stage, we can\n # leverage the fact that subsequent OSA blocks have the same input and\n # output channel depth, concat_channels. This lets us reuse the concept of\n # a residual from ResNet models.\n self.identity = in_channels == concat_channels\n self.layers = torch.nn.ModuleList()\n self.use_depthwise = use_depthwise\n conv_op = dw_conv if use_depthwise else conv\n\n # If this model uses depthwise and the input channel depth needs to be reduced\n # to the stage_channels size, add a pointwise layer to adjust the depth. If the\n # model is not depthwise, let the first OSA layer do the resizing.\n if self.use_depthwise and self.isReduced:\n self.conv_reduction = torch.nn.Sequential(\n *pointwise(in_channels, stage_channels)\n )\n in_channels = stage_channels\n\n for _ in range(layer_per_block):\n self.layers.append(\n torch.nn.Sequential(*conv_op(in_channels, stage_channels))\n )\n in_channels = stage_channels\n\n # feature aggregation\n aggregated += layer_per_block * stage_channels\n self.concat = torch.nn.Sequential(*pointwise(aggregated, concat_channels))\n self.ese = ESE(concat_channels)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n\n if self.identity:\n identity_feat = x\n\n output = [x]\n if self.use_depthwise and self.isReduced:\n x = self.conv_reduction(x)\n\n # Loop through all the\n for layer in self.layers:\n x = layer(x)\n output.append(x)\n\n x = torch.cat(output, dim=1)\n xt = self.concat(x)\n xt = self.ese(xt)\n\n if self.identity:\n xt += identity_feat\n\n return xt\n\n\nclass OSA_stage(torch.nn.Sequential):\n def __init__(\n self,\n in_channels: int,\n stage_channels: int,\n concat_channels: int,\n block_per_stage: int,\n layer_per_block: int,\n stage_num: int,\n use_depthwise: bool = False,\n ) -> None:\n \"\"\"An OSA stage which is comprised of OSA blocks.\n Args:\n in_channels: Channel depth of the input to the OSA stage.\n stage_channels: Channel depth to reduce the input of the block to.\n concat_channels: Channel depth to force on the concatenated output of the\n comprising layers in a block.\n block_per_stage: Number of OSA blocks in this stage.\n layer_per_block: The number of layers per OSA block.\n stage_num: The OSA stage index.\n use_depthwise: Wether to use depthwise separable pointwise linear convs.\n \"\"\"\n super().__init__()\n\n # Use maxpool to downsample the input to this OSA stage.\n self.add_module(\n \"Pooling\", torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n )\n\n for idx in range(block_per_stage):\n # Add the OSA modules. If this is the first block in the stage, use the\n # proper in in channels, but the rest of the rest of the OSA layers will use\n # the concatenation channel depth outputted from the previous layer.\n self.add_module(\n f\"OSA{stage_num}_{idx + 1}\",\n OSA(\n in_channels if idx == 0 else concat_channels,\n stage_channels,\n concat_channels,\n layer_per_block,\n use_depthwise=use_depthwise,\n ),\n )\n\n\nclass VoVNet(torch.nn.Sequential):\n def __init__(\n self, model_name: str, num_classes: int = 10, input_channels: int = 3\n ) -> None:\n \"\"\"\n Args:\n model_name: Which model to create.\n num_classes: The number of classification classes.\n input_channels: The number of input channels.\n\n Usage:\n >>> net = VoVNet(\"vovnet-19-slim-dw\", num_classes=1000)\n >>> with torch.no_grad():\n ... out = net(torch.randn(1, 3, 512, 512))\n >>> print(out.shape)\n torch.Size([1, 1000])\n\n >>> net = VoVNet(\"vovnet-19-dw\", num_classes=1000)\n >>> with torch.no_grad():\n ... out = net(torch.randn(1, 3, 512, 512))\n >>> print(out.shape)\n torch.Size([1, 1000])\n\n >>> net = VoVNet(\"vovnet-19-slim\", num_classes=1000)\n >>> with torch.no_grad():\n ... out = net(torch.randn(1, 3, 512, 512))\n >>> print(out.shape)\n torch.Size([1, 1000])\n\n >>> net = VoVNet(\"vovnet-19\", num_classes=1000)\n >>> with torch.no_grad():\n ... out = net(torch.randn(1, 3, 512, 512))\n >>> print(out.shape)\n torch.Size([1, 1000])\n\n >>> net = VoVNet(\"vovnet-39\", num_classes=1000)\n >>> with torch.no_grad():\n ... out = net(torch.randn(1, 3, 512, 512))\n >>> print(out.shape)\n torch.Size([1, 1000])\n\n >>> net = VoVNet(\"vovnet-57\", num_classes=1000)\n >>> with torch.no_grad():\n ... out = net(torch.randn(1, 3, 512, 512))\n >>> print(out.shape)\n torch.Size([1, 1000])\n\n >>> net = VoVNet(\"vovnet-99\", num_classes=1000)\n >>> with torch.no_grad():\n ... out = net(torch.randn(1, 3, 512, 512))\n >>> print(out.shape)\n torch.Size([1, 1000])\n \"\"\"\n super().__init__()\n assert model_name in _STAGE_SPECS, f\"{model_name} not supported.\"\n\n stem_ch = _STAGE_SPECS[model_name].stem_out\n config_stage_ch = _STAGE_SPECS[model_name].stage_conv_ch\n config_concat_ch = _STAGE_SPECS[model_name].stage_out_ch\n block_per_stage = _STAGE_SPECS[model_name].block_per_stage\n layer_per_block = _STAGE_SPECS[model_name].layer_per_block\n conv_type = dw_conv if _STAGE_SPECS[model_name].dw else conv\n\n # Construct the stem.\n stem = conv(input_channels, 64, stride=2)\n stem += conv_type(64, 64)\n\n # The original implementation uses a stride=2 on the conv below, but in this\n # implementation we'll just pool at every OSA stage, unlike the original\n # which doesn't pool at the first OSA stage.\n stem += conv_type(64, stem_ch)\n self.model = torch.nn.Sequential()\n self.model.add_module(\"stem\", torch.nn.Sequential(*stem))\n self._out_feature_channels = [stem_ch]\n\n # Organize the outputs of each OSA stage. This is the concatentated channel\n # depth of each sub block's layer's outputs.\n in_ch_list = [stem_ch] + config_concat_ch[:-1]\n\n # Add the OSA modules. Typically 4 modules.\n for idx in range(len(config_stage_ch)):\n self.model.add_module(\n f\"OSA_{(idx + 2)}\",\n OSA_stage(\n in_ch_list[idx],\n config_stage_ch[idx],\n config_concat_ch[idx],\n block_per_stage[idx],\n layer_per_block,\n idx + 2,\n _STAGE_SPECS[model_name].dw,\n ),\n )\n\n self._out_feature_channels.append(config_concat_ch[idx])\n\n # Add the classification head.\n self.model.add_module(\n \"classifier\",\n torch.nn.Sequential(\n torch.nn.BatchNorm2d(\n self._out_feature_channels[-1], _BN_MOMENTUM, _BN_EPS\n ),\n torch.nn.AdaptiveAvgPool2d(1),\n torch.nn.Flatten(),\n torch.nn.Dropout(0.2),\n torch.nn.Linear(self._out_feature_channels[-1], num_classes, bias=True),\n ),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.model(x)\n\n def forward_pyramids(self, x: torch.Tensor) -> collections.OrderedDict:\n \"\"\"\n Args:\n model_name: Which model to create.\n num_classes: The number of classification classes.\n input_channels: The number of input channels.\n Usage:\n >>> net = VoVNet(\"vovnet-19-slim-dw\", num_classes=1000)\n >>> net.delete_classification_head()\n >>> with torch.no_grad():\n ... out = net.forward_pyramids(torch.randn(1, 3, 512, 512))\n >>> [level.shape[-1] for level in out.values()] # Check the height/widths of levels\n [256, 128, 64, 32, 16]\n >>> [level.shape[1] for level in out.values()] == net._out_feature_channels\n True\n \"\"\"\n levels = collections.OrderedDict()\n levels[1] = self.model.stem(x)\n levels[2] = self.model.OSA_2(levels[1])\n levels[3] = self.model.OSA_3(levels[2])\n levels[4] = self.model.OSA_4(levels[3])\n levels[5] = self.model.OSA_5(levels[4])\n return levels\n\n def delete_classification_head(self) -> None:\n \"\"\" Call this before using model as an object detection backbone. \"\"\"\n del self.model.classifier\n\n def get_pyramid_channels(self) -> None:\n \"\"\" Return the number of channels for each pyramid level. \"\"\"\n return self._out_feature_channels\n","sub_path":"vovnet.py","file_name":"vovnet.py","file_ext":"py","file_size_in_byte":13698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"497416867","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('mainapp', '0005_auto_20170110_0653'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Manuscript',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=200)),\n ('language', models.CharField(max_length=100)),\n ],\n ),\n migrations.CreateModel(\n name='MSCollection',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=100)),\n ],\n ),\n migrations.CreateModel(\n name='Page',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=200)),\n ('status', models.CharField(max_length=50)),\n ('manuscript', models.ForeignKey(to='mainapp.Manuscript')),\n ],\n ),\n migrations.AddField(\n model_name='manuscript',\n name='collection',\n field=models.ForeignKey(to='mainapp.MSCollection'),\n ),\n ]\n","sub_path":"web_annotator_django/mainapp/_migrations/0006_auto_20170110_1443.py","file_name":"0006_auto_20170110_1443.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"427235683","text":"import sys\nfrom functools import wraps\nfrom shutil import copyfile\n\nimport click\nfrom click import make_pass_decorator, option\n\nfrom esque.cli import environment\nfrom esque.cli.helpers import ensure_approval\nfrom esque.cluster import Cluster\nfrom esque.config import Config, config_dir, config_path, sample_config_path\nfrom esque.errors import ConfigNotExistsException\n\n\nclass State(object):\n def __init__(self):\n self.no_verify = False\n self._verbose = False\n self._cluster = None\n self._config = None\n\n @property\n def config(self) -> Config:\n if self._config is None:\n self._create_config()\n return self._config\n\n def _create_config(self):\n try:\n self._config = Config.get_instance()\n except ConfigNotExistsException:\n click.echo(f\"No config provided in {config_dir()}\")\n if ensure_approval(f\"Should a sample file be created in {config_dir()}\"):\n config_dir().mkdir(exist_ok=True)\n copyfile(sample_config_path().as_posix(), config_path())\n else:\n raise\n if ensure_approval(\"Do you want to modify the config file now?\"):\n click.edit(filename=config_path().as_posix())\n self._config = Config.get_instance()\n\n @property\n def cluster(self):\n if not self._cluster:\n self._cluster = Cluster()\n return self._cluster\n\n def _get_verbose(self) -> bool:\n if environment.ESQUE_VERBOSE is not None:\n return True\n return self._verbose\n\n def _set_verbose(self, verbose):\n self._verbose = verbose\n\n verbose = property(_get_verbose, _set_verbose)\n\n\npass_state = make_pass_decorator(State, ensure=True)\n\n\ndef verbose_callback(context, _: str, verbose=False):\n state = context.ensure_object(State)\n state.verbose = verbose\n\n\nverbose_option = click.option(\n \"-v\",\n \"--verbose\",\n is_flag=True,\n is_eager=True,\n callback=verbose_callback,\n expose_value=False,\n help=\"Return stack trace on error.\",\n)\n\n\ndef default_options(f):\n defaults = [no_verify_option, verbose_option, error_handler, pass_state]\n for decorator in defaults:\n f = decorator(f)\n return f\n\n\ndef no_verify_option(f):\n def callback(ctx, param, value):\n state = ctx.ensure_object(State)\n state.no_verify = value\n\n return option(\n \"--no-verify\",\n type=bool,\n help=\"Skip all verification dialogs and answer them with yes.\",\n required=False,\n is_flag=True,\n expose_value=False,\n default=False,\n callback=callback,\n )(f)\n\n\noutput_format_option = click.option(\n \"-o\",\n \"--output-format\",\n type=click.Choice([\"yaml\", \"json\"], case_sensitive=False),\n help=\"Format of the output.\",\n required=False,\n)\n\n\ndef error_handler(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n state = args[0]\n if not isinstance(state, State):\n raise TypeError(\n \"First argument is not a state, make sure that the `error_handler` decorator comes below `pass_state`\"\n )\n try:\n f(*args, **kwargs)\n except Exception as e:\n if state.verbose:\n raise\n _silence_exception(e)\n\n return wrapper\n\n\ndef _silence_exception(e: Exception):\n if hasattr(e, \"format_message\"):\n click.echo(e.format_message())\n elif isinstance(e, (KeyError, ValueError)):\n click.echo(f\"{type(e).__name__}: {str(e)}\")\n else:\n click.echo(f\"Exception of type {type(e).__name__} occurred: {e}\")\n click.echo(\"Run with `--verbose` for complete error.\")\n sys.exit(1)\n","sub_path":"esque/cli/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":3706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"573084349","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\n\"\"\"\n This script has to be executed after hi_freq_data_to_csv.py and get_interval.py have succesfully run.\n This script should be called with 1 (or 2) arguments.\n The 1st mandatory argument is the ABSOLUTE path of the top directory for the flight campaign.\n\n /media/spectors/HDD320/lidar/20201218_fresh <<----- This is it!\n ----------------------------/20201218_fresh/p_00_joined_pcap_files\n ----------------------------/20201218_fresh/p_01_apx_csv_shapefile <<----- This must be present and will be used as input.\n ----------------------------/20201218_fresh/p_02_plt <<----- Not used. Just for reference.\n ----------------------------/20201218_fresh/p_03_pcap <<----- This must be present and will be used as input.\n ----------------------------/20201218_fresh/2_planned_mision\n ----------------------------/20201218_fresh/ .....\n ----------------------------/20201218_fresh/logging <<----- This is where the logs will be stored.\n ----------------------------/20201218_fresh/transl_table.txt <<----- This must be present and will be used as input.\n\n The 2nd optional argument can be a boresight-calibration string.\n It must contain the boresight angles and be of the following form:\n # RabcdefghPijklmnopYqrstuvwx\n # Where abcdefgh is milionths of degree to ROLL. a is sign (p/n)\n # ..... ijklmnop is milionths of degree to PITCH. i is sign (p/n)\n # ..... qrstuvwx is milionths of degree to YAW. q is sign (p/n)\n # In this order! ROLL -> PITCH -> YAW !\n # Theoretically can encode up to 9.9° around each axis\n \n This script combines .csv files with each of the .pcap flight lines and writes point clouds in .txt files.\n It then calls a lew lastools to convert them to las, denoise and set the correct (georeference) metadata.\n The script is run non-interactively.\n The only exception is choosing the p_01_apx_csv_shapefile and p__03_pcap folders at the beginning if there are muktiple of them.\n TO DO: add support for different EPSG codes.\n\"\"\"\n\n\nimport time\nimport os\nimport sys\nimport datetime\nimport platform\nimport logging\nimport shutil\nimport re\nfrom collections import OrderedDict\nfrom multiprocessing import Pool, cpu_count\nfrom multiprocessing.managers import SharedMemoryManager\nfrom multiprocessing.shared_memory import SharedMemory\nfrom scipy.interpolate import interp1d\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scapy.all import rdpcap\n#from vlp16_tables import *\nimport vlp16_tables\n\n\nlog_dir = 'p_logging'\ntxt_dir_in = 'p_01_apx_csv_shapefile'\ntxt_in_base_len = len(txt_dir_in)\npcap_dir_in = 'p_03_pcap'\npcap_in_base_len = len(pcap_dir_in)\nout_dir_ascii = 'p_04_ascii'\nout_ascii_base_len = len(out_dir_ascii)\nout_dir_las = 'p_05_las'\nout_las_base_len = len(out_dir_las)\ntransl_table_fn = 'p_transl_table.txt'\nfn_keyword = 'hi_freq_apx'\nnl = '\\n'\n\ndef shorten_string(text_string):\n \"\"\"\n Function to remove all duplicates from string \n and keep the order of characters same \n https://www.geeksforgeeks.org/remove-duplicates-given-string-python/\n \"\"\"\n return \"\".join(OrderedDict.fromkeys(text_string)) \n\ndef remove_min_sec(ts):\n return (int(ts) // 3600) * 3600\n\n# ### Function to calculate the gaps between given azimuths. Needed to interpolate azimuths that are not given.\n\ndef get_azim_gap(azimuths, dual=True, preserve_shape=False):\n \"\"\"\n Only works for dual returns now.\n \n preserve_shape is relevant for dual, where the azimuths repeat.\n if False: return only unique gaps.\n if True: return same shape as azimuths\n \"\"\"\n if dual:\n azimuths_gap_flat = np.zeros_like(azimuths[:,0::2]).flatten()\n azimuths_gap_flat[:-1] = ((azimuths[:,0::2].flatten()[1:] -\\\n azimuths[:,0::2].flatten()[:-1]) % 36000)\n azimuths_gap_flat[-1] = azimuths_gap_flat[-2]\n azimuths_gap = azimuths_gap_flat.reshape(azimuths[:,0::2].shape)\n if preserve_shape:\n azimuths_gap = np.tile(azimuths_gap,2)\n return azimuths_gap\n else:\n raise NotImplementedError\n\ndef get_micros_pulses(micros, dual=True, preserve_shape=False):\n \"\"\"\n preserve_shape is relevant for dual, where the azimuths repeat.\n if False: return only unique gaps.\n if True: return same shape as azimuths\n \"\"\"\n if dual:\n if preserve_shape:\n micros_pulses = np.expand_dims(micros, axis=1) +\\\n vlp16_tables.TIMING_OFFSETS_DUAL.T.flatten() * 1e6\n else:\n micros_pulses = np.expand_dims(micros, axis=1) +\\\n vlp16_tables.TIMING_OFFSETS_DUAL.T[0::2,:].flatten() * 1e6\n else:\n micros_pulses = np.expand_dims(micros, axis=1) +\\\n vlp16_tables.TIMING_OFFSETS_SINGLE.T.flatten() * 1e6\n return micros_pulses\n\ndef get_precision_azimuth(az_simple, azimuths_gap, dual=True, minimal_shape=True):\n if dual:\n timing_offsets_within_block = vlp16_tables.TIMING_OFFSETS_DUAL[:,0]\n az_pulses = np.tile(az_simple,(vlp16_tables.LASERS_PER_DATA_BLOCK)).reshape(\\\n az_simple.shape[0], vlp16_tables.LASERS_PER_DATA_BLOCK, az_simple.shape[1])\n az_pulses = az_pulses.transpose((0,2,1))\n precision_azimuth = az_pulses[:,:,:] +\\\n timing_offsets_within_block / (2 * vlp16_tables.T_CYCLE) *\\\n np.expand_dims(azimuths_gap, axis=2)\n precision_azimuth = precision_azimuth % 36000\n if not minimal_shape:\n precision_azimuth = np.tile(\\\n precision_azimuth.transpose((0,2,1)), (1,2,1)).transpose((0,2,1))\n precision_azimuth = precision_azimuth.reshape(\\\n (precision_azimuth.shape[0], precision_azimuth.shape[1] * precision_azimuth.shape[2]))\n return precision_azimuth\n else:\n raise NotImplementedError\n\ndef process_file(pcap_file_in, pcap_dir_in,\n out_dir_ascii, out_dir_las,\n shm_name, shm_shp, shm_dtp,\n b_roll, b_pitch, b_yaw,\n concat_cmd, wine_cmd):\n print(f\"Processing {pcap_file_in}\")\n logging.info(f\"Processing {pcap_file_in}\")\n\n loc_shm = SharedMemory(shm_name)\n loc_apx_arr = np.recarray(shape=shm_shp, dtype=shm_dtp, buf=loc_shm.buf)\n\n ### Temporary plug-in here.\n # This is not a proper solution, just a quick proof-of-concept\n # Before hand must manually copy the file yaw_correction.csv into the appropriate folder\n if 'yaw_correction.csv' in os.listdir(pcap_dir_in):\n yaw_agisoft = pd.read_csv(os.path.join(pcap_dir_in, 'yaw_correction.csv'), index_col=0)\n else:\n # just have a dataframe that when interpolated will result in 0 everywhere\n idx = pd.Index([0, 1, 2597835528, 2597835529], name='utc_time')\n yaw_agisoft = pd.DataFrame(data = np.array([[0],[0],[0],[0]]),\n columns = ['smooth_yaw_err'],\n index = idx)\n\n\n # ### Read entire file only once (takes most time)\n\n start = time.time()\n packets = rdpcap(os.path.join(pcap_dir_in, pcap_file_in))\n packets_read = len(packets)\n end = time.time()\n print(F\"{pcap_file_in}: Read {packets_read} packets in {end-start:.2f} seconds.\")\n logging.info(F\"{pcap_file_in}: Read {packets_read} packets in {end-start:.2f} seconds.\")\n\n\n # ### Make sure all packets have length == 1206!\n start = time.time()\n wrong_lengths = 0\n for p in packets:\n if len(p.load) != vlp16_tables.DATA_PACKET_LENGTH:\n wrong_lengths += 1\n end = time.time()\n logging.info(F\"{pcap_file_in}: Checked {packets_read} packets in {end-start:.2f} seconds.\")\n logging.info('All have same length ('+str(vlp16_tables.DATA_PACKET_LENGTH)+').' \\\n if wrong_lengths==0 else str(wrong_lengths)+' packets have a different length.')\n logging.info('This is GOOD!' if wrong_lengths==0 else 'This is BAD!')\n\n\n # ### Read all packets into 1 numpy array\n start = time.time()\n raw_pack_data = np.zeros((packets_read, vlp16_tables.DATA_PACKET_LENGTH), dtype = np.uint8)\n for i,p in enumerate(packets):\n raw_pack_data[i,:] = np.frombuffer(p.load, dtype = np.uint8)\n if i % 1e5 == 0:\n print(f\"{pcap_file_in}: Packet {i} out of {packets_read} in {time.time()-start:.2f} seconds.\")\n end = time.time()\n logging.info(F\"{pcap_file_in}: Copied data from {packets_read} packets into a numpy array of shape {raw_pack_data.shape} in {end-start:.2f} seconds.\")\n\n\n # ### Make sure all packets are captured in the same mode (last, strongest, dual)\n mode_hypothesis = raw_pack_data[0, vlp16_tables.RETURN_MODE_OFFSET]\n logging.info(f\"First packet reports {vlp16_tables.RETURN_MODE_NAME[mode_hypothesis]} capture mode.\")\n diff_ret_mode = (raw_pack_data[:, vlp16_tables.RETURN_MODE_OFFSET] != mode_hypothesis).sum()\n logging.info(f\"{diff_ret_mode} packets disagree.\")\n logging.info(f\"{'This is GOOD!' if diff_ret_mode == 0 else 'This is BAD!'}\")\n\n\n # ### Make sure all packets are captured with the same sensor (only VLP16 expected)\n sensor_hypothesis = raw_pack_data[0, vlp16_tables.PRODUCT_MODEL_OFFSET]\n logging.info(f\"{pcap_file_in}: First packet reports {vlp16_tables.PRODUCT_MODEL_NAME[sensor_hypothesis]} sensor model.\")\n diff_sensor = (raw_pack_data[:, vlp16_tables.PRODUCT_MODEL_OFFSET] != sensor_hypothesis).sum()\n logging.info(f\"{pcap_file_in}: {diff_sensor} packets disagree.\")\n logging.info(f\"{pcap_file_in}: {'This is GOOD!' if diff_sensor == 0 else 'This is BAD!'}\")\n\n\n # ### Get µs timestamp from packets and transform to UNIX timestamp\n # \n # I found that Ethernet timestamp agrees with GNSS timestamp very well.\n # \n # Can be problematic if very close ho full hour and I am not careful.\n # \n # Let's look at 1st Ethernet timestamp.\n # \n # * if it is far enough from a full hour (>=1 minute), then we continue\n # * ~if it is too close (<1 minute), then we look at last one~ _not implemented_\n # * ~if last one is also too close (recorded for 1 entire hour, not likely),\n # we find an optimal one in the middle~ _not implemented_\n\n\n\n ts_1st_pack = datetime.datetime.fromtimestamp(int(packets[0].time))\n if ts_1st_pack.minute > 1 and ts_1st_pack.minute < 59:\n logging.info(f\"{pcap_file_in}: Far enough from full hour (~{ts_1st_pack.minute} minutes).\")\n logging.info(\"This is GOOD!\\nContinue!\")\n else:\n logging.info(f\"{pcap_file_in}: Too close to full hour (~{ts_1st_pack.minute} minutes).\")\n logging.info(\"That is not great, but the code below should deal with it.\")\n\n\n # #### Take Ethernet timestamp of (1st) packet,\n # discard sub-hour info and add replace it with that from GNSS µs timestamp\n # \n # What happens when the capture rolls over a full hour?\n # \n # **Need to deal with this when such data is captured!**\n # \n # # Solution below!\n\n start = time.time()\n micros = np.zeros((packets_read,), dtype = np.int64)\n micro_bytes = micros.view(dtype = np.uint8)\n micro_bytes[0::8] = raw_pack_data[:, vlp16_tables.DATA_PACK_TIMESTAMP_OFFSET + 0]\n micro_bytes[1::8] = raw_pack_data[:, vlp16_tables.DATA_PACK_TIMESTAMP_OFFSET + 1]\n micro_bytes[2::8] = raw_pack_data[:, vlp16_tables.DATA_PACK_TIMESTAMP_OFFSET + 2]\n micro_bytes[3::8] = raw_pack_data[:, vlp16_tables.DATA_PACK_TIMESTAMP_OFFSET + 3]\n plt.plot(micros)\n end = time.time()\n logging.info(f\"{pcap_file_in}: Extracted time stamp from {packets_read} packets in {end-start:.2f} seconds.\")\n logging.info(f\"{pcap_file_in}: If the line jumps, a full hour occurs. Need to deal with it!\")\n\n\n # #### Another problem could be that the UDP packets are not guaranteed to arrive in order.\n # \n # An assumption that is made for the following calculations is that this does not happen.\n # \n # **Need to deal with this when such data is captured!**\n\n\n\n while (micros[1:] < micros[:-1]).sum() > 0:\n jump_position = np.where((micros[1:] < micros[:-1]))[0][0] + 1\n micros[jump_position:] += int(3.6e9)\n logging.info(f\"{pcap_file_in}: Added another hour to micros at position {jump_position}\")\n plt.plot(micros)\n\n if (micros[1:] - micros[:-1]).min() > 0:#all chronological\n logging.info(f\"{pcap_file_in}: Packets seem to be in right order. Continue!\")\n else:\n logging.info(f\"{pcap_file_in}: Not all packets are in order. Handle somehow!\")\n print(f\"{pcap_file_in}: Not all packets are in order. Handle somehow!\")\n sys.exit(0)\n\n\n eth_ts_hour = remove_min_sec(packets[0].time)\n\n puck_timestamps = micros / 1e6 + eth_ts_hour * 1.0\n\n\n # ### Get range and intensity info for all packets\n start = time.time()\n\n # the following contains only channel data (i.e. no timestamp, factory bytes or azimuth)\n channel_data = raw_pack_data[:,:-6].reshape(\\\n (packets_read, vlp16_tables.DATA_BLOCKS, 100))[:,:,4:]\n channel_data = channel_data.reshape(\\\n (packets_read, vlp16_tables.DATA_BLOCKS * vlp16_tables.LASERS_PER_DATA_BLOCK * 3))\n\n #puck ranges in mm\n puck_ranges = np.zeros(\\\n (packets_read,\\\n vlp16_tables.DATA_BLOCKS * vlp16_tables.LASERS_PER_DATA_BLOCK),\\\n dtype = np.uint32)\n puck_range_bytes = puck_ranges.view(dtype = np.uint8)\n puck_range_bytes[:,0::4] = channel_data[:,0::3]\n puck_range_bytes[:,1::4] = channel_data[:,1::3]\n puck_ranges *= 2\n\n #intensities as 1 byte\n puck_intens = np.zeros(\\\n (packets_read,\\\n vlp16_tables.DATA_BLOCKS * vlp16_tables.LASERS_PER_DATA_BLOCK),\\\n dtype = np.uint8)\n puck_intens[:,:] = channel_data[:,2::3]\n\n end = time.time()\n\n logging.info(f\"{pcap_file_in}: Extracted range and intensity for {packets_read * vlp16_tables.DATA_BLOCKS * vlp16_tables.LASERS_PER_DATA_BLOCK} laser pulses in {end-start:.2f} seconds.\")\n\n\n # ### Get all given azimuths\n # \n # Think how to treat them for dual / single cases later.\n # \n # For now assume it is always DUAL!\n\n\n # Changed azimuths data type to signed 32-bit integer to support in-place substraction\n start = time.time()\n\n # the following contains only azimuth data (i.e. no timestamp, factory bytes or channel data)\n azimuths = np.zeros((packets_read, vlp16_tables.DATA_BLOCKS, 1), dtype = np.int32)\n azim_data = azimuths.view(dtype = np.uint8)\n azim_data[:,:,0:2] = raw_pack_data[:, :-6].reshape(\\\n packets_read, vlp16_tables.DATA_BLOCKS, 100)[:, :, 2:4]\n azim_data = azim_data.reshape((packets_read, vlp16_tables.DATA_BLOCKS * 4))\n\n #azimuth\n azimuths = azim_data.view(dtype= np.int32)\n\n end = time.time()\n\n logging.info(f\"{pcap_file_in}: Extracted azimuths for {packets_read * vlp16_tables.DATA_BLOCKS} firing sequences in {end-start:.2f} seconds.\")\n\n\n # ### All packets are in dual return mode, so the azimuths are expected to repeat (VLP-16 User Manual, Figure 9-3)\n # \n # The following checks this assumption again:\n\n az_repeat = ((azimuths[:, 0::2] != azimuths[:, 1::2]).sum() == 0)\n if az_repeat:\n logging.info(f\"{pcap_file_in}: All azimuths repeat. This is good.\")\n else:\n logging.info(f\"{pcap_file_in}: Not all azimuths repeat. Investigate before continuing.\")\n\n\n azimuths_gap = get_azim_gap(azimuths)\n\n micros_pulses = get_micros_pulses(micros)\n # timestamp for each laser pulse\n puck_pulse_time = micros_pulses / 1e6 + eth_ts_hour * 1.0\n\n\n # ### Calculate the azimutzhs for each datapoint\n\n #Use the following simplified array if in dual mode\n #Otherwise can still refer to it, but it's just the original array\n if mode_hypothesis == vlp16_tables.RETURN_MODE_DUAL:\n az_simple = azimuths[:,0::2]\n else:\n az_simple = azimuths\n\n prec_az = get_precision_azimuth(az_simple, azimuths_gap, True, True)\n\n\n # ### Get the APX data\n\n #cut the big dataframe to only what the puck data covers\n interv = np.where(\\\n np.logical_and(\\\n loc_apx_arr.timestamp > puck_timestamps[0],\n loc_apx_arr.timestamp < puck_timestamps[-1]))[0]\n mid_apx_arr = loc_apx_arr[max(interv[0] - 1, 0):min(interv[-1] + 1, loc_apx_arr.shape[0])]\n\n # ### process puck data...\n concat_files = []\n MAXIMUM_POINTS_PER_RUN = 20000# * iterate over puck_timestamps\n max_laps = int(np.ceil(puck_timestamps.size / MAXIMUM_POINTS_PER_RUN))\n for run_count in range(max_laps):\n print(f'{pcap_file_in}: Running slice {run_count} out of {max_laps}')\n current_range = np.arange(\\\n 0, min(MAXIMUM_POINTS_PER_RUN, puck_timestamps.size -\\\n MAXIMUM_POINTS_PER_RUN * run_count)) +\\\n MAXIMUM_POINTS_PER_RUN * run_count #a slice that hopefully fits in RAM\n \n #time in seconds\n min_time = puck_timestamps[current_range][0]\n max_time = puck_timestamps[current_range][-1]\n\n print(f\"{pcap_file_in}: Processing {(max_time - min_time):.2f} seconds\")\n\n interv = np.where(\\\n np.logical_and(\\\n mid_apx_arr.timestamp > min_time,\n mid_apx_arr.timestamp < max_time))[0]\n sml_apx_arr = mid_apx_arr[max(interv[0] - 1, 0):min(interv[-1] + 2, mid_apx_arr.shape[0])]\n relevant_times = puck_pulse_time[current_range,:]\n\n strongest_return_ranges = puck_ranges[current_range].reshape(\\\n (-1, vlp16_tables.DATA_BLOCKS, vlp16_tables.LASERS_PER_DATA_BLOCK))\\\n [:,1::2].flatten() / 1000\n strongest_return_intensities = puck_intens[current_range].reshape(\\\n (-1, vlp16_tables.DATA_BLOCKS, vlp16_tables.LASERS_PER_DATA_BLOCK))\\\n [:,1::2].flatten()\n\n last_return_ranges = puck_ranges[current_range].reshape(\\\n (-1, vlp16_tables.DATA_BLOCKS, vlp16_tables.LASERS_PER_DATA_BLOCK))\\\n [:,0::2].flatten() / 1000\n last_return_intensities = puck_intens[current_range].reshape(\\\n (-1, vlp16_tables.DATA_BLOCKS, vlp16_tables.LASERS_PER_DATA_BLOCK))\\\n [:,0::2].flatten()\n\n azimuth = prec_az[current_range]\n\n vert_elev_angle = np.tile(vlp16_tables.elevation_and_vert_corr_by_laser_id[:,0], (1,12))\n vert_elev_angle = np.tile(vert_elev_angle, (azimuth.shape[0],1))\n\n global_laser_id = np.tile(np.arange(16, dtype = np.uint8), (1,12))\n global_laser_id = np.tile(global_laser_id, (azimuth.shape[0],1))\n\n azimuth = np.deg2rad(azimuth / 100).flatten()\n vert_elev_angle = vert_elev_angle.flatten()\n\n f_lat = interp1d(sml_apx_arr.timestamp,\n sml_apx_arr[\"lat_EPSG32632\"],\n kind='cubic', fill_value=\"extrapolate\")\n f_lon = interp1d(sml_apx_arr.timestamp,\n sml_apx_arr[\"lon_EPSG32632\"],\n kind='cubic', fill_value=\"extrapolate\")\n f_ele = interp1d(sml_apx_arr.timestamp,\n sml_apx_arr[\"elevation\"],\n kind='cubic', fill_value=\"extrapolate\")\n f_yaw = interp1d(sml_apx_arr.timestamp,\n sml_apx_arr[\"heading_continuous\"],\n kind='cubic', fill_value=\"extrapolate\")\n f_rol = interp1d(sml_apx_arr.timestamp,\n sml_apx_arr[\"roll\"],\n kind='cubic', fill_value=\"extrapolate\")\n f_pit = interp1d(sml_apx_arr.timestamp,\n sml_apx_arr[\"pitch\"],\n kind='cubic', fill_value=\"extrapolate\")\n f_yaw_agisoft = interp1d(yaw_agisoft.index.values,\n yaw_agisoft['smooth_yaw_err'],\n kind='cubic', fill_value=\"extrapolate\")\n\n MIN_RANGE = 2 #metres\n for return_counter in range(1, 3):\n if return_counter == 1:\n condition = strongest_return_ranges > MIN_RANGE\n condition_double = np.logical_and(\\\n last_return_ranges > MIN_RANGE,\n last_return_ranges != strongest_return_ranges)\n\n return_ranges = strongest_return_ranges\n return_intensities = strongest_return_intensities\n elif return_counter == 2:\n condition = np.logical_and(\\\n last_return_ranges > MIN_RANGE,\n last_return_ranges != strongest_return_ranges)\n condition_double = np.ones_like(last_return_ranges, dtype=np.bool8)\n\n return_ranges = last_return_ranges\n return_intensities = last_return_intensities\n\n lat = f_lat(relevant_times).flatten()\n lon = f_lon(relevant_times).flatten()\n ele = f_ele(relevant_times).flatten()\n yaw = f_yaw(relevant_times).flatten() - f_yaw_agisoft(relevant_times).flatten() % 360 #check the sign!\n rol = f_rol(relevant_times).flatten()\n pit = f_pit(relevant_times).flatten()\n\n X_puck = np.ones_like(return_intensities) * np.nan\n Y_puck = np.ones_like(return_intensities) * np.nan\n Z_puck = np.ones_like(return_intensities) * np.nan\n\n #VLP manual p.53\n X_puck[condition] = return_ranges[condition] * np.cos(vert_elev_angle)[condition] *\\\n np.sin(azimuth)[condition]\n Y_puck[condition] = return_ranges[condition] * np.cos(vert_elev_angle)[condition] *\\\n np.cos(azimuth)[condition]\n Z_puck[condition] = return_ranges[condition] * np.sin(vert_elev_angle)[condition]\n\n # first rotate into XYZ of the drone!\n # x_roll = -90 #degrees\n # y_pitch = 0\n # z_yaw = -90\n #rotation from puck to uav coordinates:\n R_01 = np.array([[0., 1., 0.],\n [0., 0., 1.],\n [1., 0., 0.]])\n\n #get rid of invalid entries\n X_puck = X_puck[condition]\n Y_puck = Y_puck[condition]\n Z_puck = Z_puck[condition]\n\n XYZ_puck = np.vstack((X_puck, Y_puck, Z_puck)).T\n XYZ_puck = XYZ_puck[:, np.newaxis, :]\n XYZ_uav = np.matmul(XYZ_puck, R_01)\n \n #rotation for boresight roll (in UAV coord):\n bor_rl_s = np.sin(np.radians(b_roll))\n bor_rl_c = np.cos(np.radians(b_roll))\n R_02 = np.array([[1., 0., 0.],\n [0., bor_rl_c, -bor_rl_s],\n [0., bor_rl_s, bor_rl_c]])\n XYZ_uav = np.matmul(XYZ_uav, R_02)\n \n #rotation for boresight pitch (in UAV coord):\n bor_pt_s = np.sin(np.radians(b_pitch))\n bor_pt_c = np.cos(np.radians(b_pitch))\n R_03 = np.array([[ bor_pt_c, 0., bor_pt_s],\n [ 0., 1., 0.],\n [ -bor_pt_s, 0., bor_pt_c]])\n XYZ_uav = np.matmul(XYZ_uav, R_03)\n \n #rotation for boresight yaw (in UAV coord):\n bor_yw_s = np.sin(np.radians(b_yaw))\n bor_yw_c = np.cos(np.radians(b_yaw))\n R_04 = np.array([[ bor_yw_c, -bor_yw_s, 0.],\n [ bor_yw_s, bor_yw_c, 0.],\n [ 0., 0., 1.]])\n XYZ_uav = np.matmul(XYZ_uav, R_04)\n \n #now rotate to real world...\n yaw_correction, pit_correction, rol_correction = -np.radians(yaw[condition]),\\\n -np.radians(pit[condition]),\\\n -np.radians(rol[condition])\n\n cos_gamma = np.cos(rol_correction)\n sin_gamma = np.sin(rol_correction)\n\n cos_beta = np.cos(pit_correction)\n sin_beta = np.sin(pit_correction)\n\n cos_alpha = np.cos(yaw_correction)\n sin_alpha = np.sin(yaw_correction)\n\n\n R_gamma = np.array([[ np.ones_like(cos_gamma),np.zeros_like(cos_gamma),np.zeros_like(cos_gamma)],\n [np.zeros_like(cos_gamma), cos_gamma, -sin_gamma ],\n [np.zeros_like(cos_gamma), sin_gamma, cos_gamma ]])\n R_gamma = np.transpose(R_gamma, (2,0,1))\n\n\n R_beta = np.array([[ cos_beta, np.zeros_like(cos_beta), sin_beta ],\n [np.zeros_like(cos_beta), np.ones_like(cos_beta) ,np.zeros_like(cos_beta)],\n [ -sin_beta, np.zeros_like(cos_beta), cos_beta ]])\n R_beta = np.transpose(R_beta, (2,0,1))\n\n\n R_alpha = np.array([[ cos_alpha , -sin_alpha ,np.zeros_like(cos_alpha)],\n [ sin_alpha , cos_alpha ,np.zeros_like(cos_alpha)],\n [np.zeros_like(cos_alpha),np.zeros_like(cos_alpha), np.ones_like(cos_alpha)]])\n R_alpha = np.transpose(R_alpha, (2,0,1))\n\n\n XYZ_rotated = np.matmul(XYZ_uav, R_gamma)\n XYZ_rotated = np.matmul(XYZ_rotated, R_beta)\n XYZ_rotated = np.matmul(XYZ_rotated, R_alpha)\n\n #bring it into East, North, Up system (+90° around z, then +180° around new x)\n R_last = np.array([[ 0., 1., 0.],\n [ 1., 0., 0.],\n [ 0., 0.,-1.]])\n\n XYZ_rotated = np.matmul(XYZ_rotated, R_last)\n\n flight_line_id = np.ones_like(\\\n vert_elev_angle[condition],\n dtype=np.uint16) * int(pcap_file_in.split(\".\")[0].split(\"_\")[-1])\n flight_line_id = flight_line_id[:, np.newaxis, np.newaxis]\n XYZ_rotated = np.concatenate((XYZ_rotated, flight_line_id), axis = -1)\n\n return_id = np.ones_like(vert_elev_angle[condition], dtype=np.uint16) * return_counter\n return_id = return_id[:, np.newaxis, np.newaxis]\n XYZ_rotated = np.concatenate((XYZ_rotated, return_id), axis = -1)\n\n return_intensities = return_intensities[condition]\n return_intensities = return_intensities[:, np.newaxis, np.newaxis]\n XYZ_rotated = np.concatenate((XYZ_rotated, return_intensities), axis = -1)\n\n number_of_returns = np.ones_like(vert_elev_angle[condition], dtype=np.uint8) +\\\n condition_double[condition] #1 for single, 2 for double\n number_of_returns = number_of_returns[:, np.newaxis, np.newaxis]\n XYZ_rotated = np.concatenate((XYZ_rotated, number_of_returns), axis = -1)\n\n #subtract 1 billion (see here https://support.geocue.com/fixing-las-global-encoding/)\n laser_times = relevant_times.flatten()[condition] - 1e9\n laser_times = laser_times[:, np.newaxis, np.newaxis]\n XYZ_rotated = np.concatenate((XYZ_rotated, laser_times), axis = -1)\n\n #for angles\n delta_pos = np.copy(XYZ_rotated[:,:,0:3])\n delta_pos = np.matmul(delta_pos, R_alpha)\n\n #take delta_z as positive when looking down (normal scan)\n new_scan_angle = np.degrees(np.arctan2(delta_pos[:,0,0], - delta_pos[:,0,2]))\n #for some reason does not want to use short even though version 1.4\n new_scan_angle = np.clip(new_scan_angle, -128, +127)\n new_scan_angle = new_scan_angle[:, np.newaxis, np.newaxis]\n XYZ_rotated = np.concatenate((XYZ_rotated, new_scan_angle), axis = -1)\n\n #take delta_z as positive when looking down (normal scan)\n new_along_track_angle = np.degrees(np.arctan2(delta_pos[:,0,1], - delta_pos[:,0,2]))\n new_along_track_angle = new_along_track_angle[:, np.newaxis, np.newaxis]\n XYZ_rotated = np.concatenate((XYZ_rotated, new_along_track_angle), axis = -1)\n\n\n laser_id = global_laser_id.flatten()[condition]\n laser_id = laser_id[:, np.newaxis, np.newaxis]\n XYZ_rotated = np.concatenate((XYZ_rotated, laser_id), axis = -1)\n \n \n XYZ_rotated[:,0,0] += lon[condition]\n XYZ_rotated[:,0,1] += lat[condition]\n XYZ_rotated[:,0,2] += ele[condition]\n\n #to easily display height in cloudcompare\n extra_elevation_field = XYZ_rotated[:,:,2].flatten()\n extra_elevation_field = extra_elevation_field[:, np.newaxis, np.newaxis]\n XYZ_rotated = np.concatenate((XYZ_rotated, extra_elevation_field), axis = -1)\n\n\n if return_counter == 1:\n first_returns = np.copy(XYZ_rotated)\n elif return_counter == 2:\n first_returns = np.concatenate((first_returns, XYZ_rotated))\n\n\n fname = f'{pcap_file_in.split(\".\")[0]}'\n np.savetxt(os.path.join(out_dir_ascii, f\"{fname}_r{run_count:03d}.xyz\"),\n np.squeeze(first_returns, axis = -2),\n fmt=['%.3f', '%.3f', '%.3f',\n '%3d', '%1d', '%3d', '%1d', '%.9f', '%.3f', '%.3f', '%d', '%.3f'])\n current_ascii_file = f\"{fname}_r{run_count:03d}.xyz\"\n concat_files.append(os.path.join(out_dir_ascii, current_ascii_file))\n merged_ascii_file = os.path.join(out_dir_ascii, f\"{fname}.xyz\")\n\n print(f\"{pcap_file_in}: Concatenating {len(concat_files)} ascii files\")\n start = time.time()\n command = f\"{concat_cmd} {' '.join(concat_files)} > {merged_ascii_file}\"\n logging.info(command)\n os.system(command)\n print(f\"{pcap_file_in}: Done in {(time.time() -start):.2f} seconds.\")\n\n print(f\"{pcap_file_in}: Removing {len(concat_files)} redundant ascii files\")\n start = time.time()\n for f in concat_files:\n os.remove(f)\n print(f\"{pcap_file_in}: Done in {(time.time() -start):.2f} seconds.\")\n\n merged_las_file = merged_ascii_file.replace(out_dir_ascii, out_dir_las)[:-3]+'laz'\n\n print(f\"{pcap_file_in}: Transforming the ascii file to las.\")\n start = time.time()\n command = f'{wine_cmd}txt2las -i {merged_ascii_file} -parse xyzprinta012 -add_attribute 4 \"AngleAlongTrack\" \"in degrees, positive forward, negative backward\" 0.006 -add_attribute 1 \"LaserID\" \"from 0 to 15\" -add_attribute 9 \"Elevation ASL\" \"metres above sea level\" -set_point_type 6 -o {merged_las_file}'\n logging.info(command)\n os.system(command)\n print(f\"{pcap_file_in}: Done in {(time.time() -start):.2f} seconds.\")\n\n basename = merged_las_file[:-4]\n\n print(f\"{pcap_file_in}: Adding further information and denoising\")\n start = time.time()\n command = f'{wine_cmd}lasinfo -i {merged_las_file} -set_global_encoding 1\\n'\n logging.info(command)\n os.system(command)\n command = f'{wine_cmd}las2las -i {merged_las_file} -epsg 32632 -target_epsg 32632 -o {basename}_epsg.laz\\n'\n logging.info(command)\n os.system(command)\n command = f'{wine_cmd}lasnoise -i {basename}_epsg.laz -remove_noise -o {basename}_denoised.laz\\n'\n logging.info(command)\n os.system(command)\n print(f\"{pcap_file_in}: Done in {(time.time() -start):.2f} seconds.\")\n\n print(f\"Finished processing file {os.path.join(pcap_dir_in, pcap_file_in)}\")\n logging.info(f\"Finished processing file {os.path.join(pcap_dir_in, pcap_file_in)}\")\n\n\nif __name__=='__main__':\n #this_script = sys.argv[0].strip().split(os.path.sep)[-1]\n this_script = str(sys.argv[0])\n print(this_script)\n # sys.exit(0)\n\n if len(sys.argv)<=1:\n print(\"ERROR\\n\\nPlease provide the absolute path of the campaign's top directory.\\n\\nEXITING\")\n sys.exit(0)\n else:\n try: top_dir = sys.argv[1].rstrip(os.path.sep)\n except Exception as e: print(e); sys.exit(0)\n\n if sys.byteorder == 'little':\n print(f\"System endianness: {sys.byteorder}.\\nGood to go :)\")\n else:\n print(f\"System endianness: {sys.byteorder}.\\nSTOP right now!\")\n sys.exit(0)\n\n #can also modify the following instead of passing the argument\n boresight_str = \"Rp1050000Pp0200000Yn1900000\"\n\n if len(sys.argv)>2:\n try: boresight_str = sys.argv[2]\n except Exception as e:\n print(f\"failed: {e}.....{e.args}\")\n sys.exit(0)\n\n try:\n scale_factor = 1e-6\n boresight_roll = scale_factor * int(boresight_str[2:9]) *\\\n (1 if boresight_str[1] == \"p\" else -1)\n boresight_pitch = scale_factor * int(boresight_str[11:18]) *\\\n (1 if boresight_str[10] == \"p\" else -1)\n boresight_yaw = scale_factor * int(boresight_str[20:27]) *\\\n (1 if boresight_str[19] == \"p\" else -1)\n print(f\"Using the following boresight angles: Roll: {boresight_roll}, Pitch: {boresight_pitch}, Yaw: {boresight_yaw}\")\n except Exception as e:\n print(f\"failed: {e}.....{e.args}\")\n sys.exit(0)\n\n\n\n os.chdir(top_dir)\n if os.getcwd() != top_dir:\n print(\"Something went wrong.\")\n print(f\"cwd: {os.getcwd}\")\n sys.exit(0)\n\n if log_dir not in os.listdir():\n os.mkdir(log_dir)\n while out_dir_ascii in os.listdir():\n print(f\"{out_dir_ascii} already existing.\")\n time.sleep(0.1)\n out_dir_ascii = f\"{out_dir_ascii[:out_ascii_base_len]}_{str(datetime.datetime.now()).replace(':', '.').replace(' ', '_')}\"\n os.mkdir(out_dir_ascii)\n with open(os.path.join(out_dir_ascii, \"00_boresight_angles.txt\"), 'w') as fh:\n fh.write(boresight_str)\n fh.write(nl)\n fh.write(f\"Using the following boresight angles: Roll: {boresight_roll}, Pitch: {boresight_pitch}, Yaw: {boresight_yaw}\")\n\n while out_dir_las in os.listdir():\n print(f\"{out_dir_las} already existing.\")\n time.sleep(0.1)\n out_dir_las = f\"{out_dir_las[:out_las_base_len]}_{str(datetime.datetime.now()).replace(':', '.').replace(' ', '_')}\"\n os.mkdir(out_dir_las)\n shutil.copy2(os.path.join(out_dir_ascii, \"00_boresight_angles.txt\"), out_dir_las)\n\n logging.basicConfig(format='<%(asctime)s> <%(levelname)-8s> <%(message)s>',\n level=logging.DEBUG,\n filename=os.path.join(top_dir, log_dir, f'{datetime.date.today().strftime(\"%Y.%m.%d\")}_{os.path.split(this_script)[-1]}.log'),\n datefmt='%Y-%m-%d %H:%M:%S')\n logging.info(f\"{this_script} has started and received a valid top directory name\")\n logging.info(f\"Using the following boresight angles: Roll: {boresight_roll}, Pitch: {boresight_pitch}, Yaw: {boresight_yaw}\")\n\n potential_dirs_in = len(re.findall(txt_dir_in, \" \".join(os.listdir())))\n if potential_dirs_in == 0:\n print(\"No valid input directories found. Make sure to run script 00_join_pcap_.. first\\nEXITING!\")\n elif potential_dirs_in > 1:\n print(\"Multiple input directories found. Choose one:\")\n valid_choices = dict()\n for i, d_name in enumerate(os.listdir()):\n if os.path.isfile(d_name):\n continue\n elif txt_dir_in not in d_name:\n continue\n else:\n print(f\"{i}: {d_name}\")\n valid_choices[i] = d_name\n\n dir_in_selected = False\n while not dir_in_selected:\n try:\n dir_in_id = int(input(\"Which directory do we use? Type a number and press enter.\\n\"))\n if dir_in_id in valid_choices.keys():\n txt_dir_in = valid_choices[dir_in_id]\n dir_in_selected = True\n except:\n print(\"Invalid choice!\")\n potential_pcap_dirs_in = len(re.findall(pcap_dir_in, \" \".join(os.listdir())))\n if potential_pcap_dirs_in == 0:\n print(\"No valid PCAP input directories found. Make sure to run the other scripts first\\nEXITING!\")\n elif potential_pcap_dirs_in > 1:\n print(\"Multiple input directories found. Choose one:\")\n valid_choices = dict()\n for i, d_name in enumerate(os.listdir()):\n if os.path.isfile(d_name):\n continue\n elif pcap_dir_in not in d_name:\n continue\n else:\n print(f\"{i}: {d_name}\")\n valid_choices[i] = d_name\n\n dir_in_selected = False\n while not dir_in_selected:\n try:\n dir_in_id = int(input(\"Which directory do we use? Type a number and press enter.\\n\"))\n if dir_in_id in valid_choices.keys():\n pcap_dir_in = valid_choices[dir_in_id]\n dir_in_selected = True\n except:\n print(\"Invalid choice!\")\n\n print(f\"Using {txt_dir_in} and {pcap_dir_in} as input\")\n logging.info(f\"Using {txt_dir_in} and {pcap_dir_in} as input\")\n\n this_os = platform.system()\n if this_os == \"Linux\":\n concatenate_command = \"cat\"\n wine_command = \"wine \"\n elif this_os == \"Windows\":\n concatenate_command = \"type\"\n wine_command = \"\"\n else:\n print(\"Unknown OS. Terminating.\")\n sys.exit(0)\n print(f\"Running on {this_os}. To concatenate we wil use '{concatenate_command}'\")\n print(f\"Calling lastools with e.g. '{wine_command}lastool'\")\n\n\n print(\"Making APX dataframe...\")\n csv_files = [f for f in sorted(os.listdir(txt_dir_in)) if fn_keyword in f]\n print(f\"Found {len(csv_files)} CSV files.\")\n big_apx_df = pd.concat(\\\n (pd.read_csv(os.path.join(txt_dir_in, f),\n sep=\";\", index_col=0, na_values=\"NAN\") for f in csv_files))\n big_apx_df = big_apx_df[[\"lon_EPSG32632\",\n \"lat_EPSG32632\",\n \"elevation\",\n \"heading_continuous\",\n \"roll\",\n \"pitch\"]]\n glob_apx_arr = big_apx_df.to_records()\n del big_apx_df\n shm_shape, shm_dtype = glob_apx_arr.shape, glob_apx_arr.dtype\n print(f\"Done concatenating {len(csv_files)} CSV files into dataframe.\")\n print(\"\\n\\n\\n\")\n logging.info(f\"Used {csv_files} from {txt_dir_in}\")\n\n with SharedMemoryManager() as smm:\n shm = smm.SharedMemory(glob_apx_arr.nbytes)\n shm_apx_arr = np.recarray(shape=shm_shape, dtype=shm_dtype, buf=shm.buf)\n np.copyto(shm_apx_arr, glob_apx_arr)\n\n fnames = sorted([fn for fn in os.listdir(pcap_dir_in) if \"line_\" in fn\\\n and len(fn) == 13 and \"pcap\" in fn])\n\n func_args = zip(fnames,\n [pcap_dir_in] * len(fnames),\n [out_dir_ascii] * len(fnames),\n [out_dir_las] * len(fnames),\n [shm.name] * len(fnames),\n [shm_shape] * len(fnames),\n [shm_dtype] * len(fnames),\n [boresight_roll] * len(fnames),\n [boresight_pitch] * len(fnames),\n [boresight_yaw] * len(fnames),\n [concatenate_command] * len(fnames),\n [wine_command] * len(fnames))\n with Pool(processes=cpu_count() * 9 // 10) as pool:\n results = pool.starmap(process_file, func_args)\n print(results)\n\n","sub_path":"10_pcap_to_point_cloud/04_get_lidar_02.py","file_name":"04_get_lidar_02.py","file_ext":"py","file_size_in_byte":39247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"129166410","text":"from django.test import TestCase\nfrom django.utils import timezone\n\n# Create your tests here.\nfrom .models import Course\n\nclass CourseModelTests(TestCase):\n def test_course_creation(self):\n course = Course.objects.create(\n title = \"Python Regular Expressions\",\n desc = \"Learn regex in Python\",\n )\n now = timezone.now()\n self.assertLess(course.create_at, now)\n","sub_path":"courses/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"140538116","text":"import tensorflow.keras as tf\n\nfrom tensorflow.keras.layers import (\n Conv2D,\n Dense,\n Dropout,\n Flatten,\n Input,\n BatchNormalization,\n MaxPooling2D,\n)\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.models import Model\n\nfrom src.models.nn_models import NnModel\nfrom src.models.residual_layer import Residual\n\n\nclass ResNetParam334k(NnModel):\n def __init__(\n self,\n N1,\n N2,\n kernel_size1,\n strides1,\n pool_size1,\n kernel_size2,\n dilation_rate1,\n dilation_rate2,\n dilation_rate3,\n dilation_rate4,\n Nfc1,\n Nfc2,\n dropout1,\n dropout2,\n ):\n\n \"\"\"\n\n :param N1:\n :param N2:\n :param kernel_size1:\n :param strides1:\n :param pool_size1:\n :param kernel_size2:\n :param dilation_rate1:\n :param dilation_rate2:\n :param dilation_rate3:\n :param dilation_rate4:\n :param Nfc1:\n :param Nfc2:\n :param dropout1:\n :param dropout2:\n \"\"\"\n\n self.N1 = N1\n self.N2 = N2\n self.kernel_size1 = kernel_size1\n self.strides1 = strides1\n self.pool_size1 = pool_size1\n self.kernel_size2 = kernel_size2\n self.Nfc1 = Nfc1\n self.Nfc2 = Nfc2\n self.dilation_rate1 = dilation_rate1\n self.dilation_rate2 = dilation_rate2\n self.dilation_rate3 = dilation_rate3\n self.dilation_rate4 = dilation_rate4\n self.dropout1 = dropout1\n self.dropout2 = dropout2\n super().__init__()\n self.model_out = None\n\n def model_architecture(self):\n\n \"\"\"\n\n :return:\n \"\"\"\n\n model_input = Input(shape=self.input_shape)\n\n model = Conv2D(\n self.N1,\n kernel_size=self.kernel_size1,\n strides=self.strides1,\n activation=\"relu\",\n padding=\"same\",\n )(model_input)\n\n model = BatchNormalization(axis=-1, scale=None)(model)\n\n model = Residual(self.N1, self.kernel_size1, self.dilation_rate1)(model)\n\n model = Residual(self.N1, self.kernel_size1, self.dilation_rate2)(model)\n\n model = Residual(self.N1, self.kernel_size1, self.dilation_rate3)(model)\n\n model = Conv2D(\n self.N2,\n kernel_size=self.kernel_size2,\n activation=\"relu\",\n dilation_rate=self.dilation_rate4,\n )(model)\n\n model = BatchNormalization(axis=-1, scale=None)(model)\n\n model = MaxPooling2D(pool_size=self.pool_size1)(model)\n\n model = Flatten()(model)\n\n model = Dense(self.Nfc1, activation=\"relu\")(model)\n\n model = Dropout(self.dropout1)(model)\n\n model = Dense(self.Nfc2, activation=\"relu\")(model)\n\n model = Dropout(self.dropout2)(model)\n\n out = Dense(self.out, activation=\"softmax\")(model)\n\n self.model_out = Model(inputs=[model_input], outputs=out)\n\n def model_compile(\n self,\n learning_rate: float = 1e-4,\n beta1: float = 0.9,\n beta2: float = 0.999,\n epsilon: float = 1e-8,\n ) -> tf.Model:\n\n \"\"\"\n Function to compile the model\n\n :param learning_rate: Initial learning rate for ADAM optimizer\n :param beta1: Exponential decay rate for the running average of the gradient\n :param beta2: Exponential decay rate for the running average of the square of the gradient\n :param epsilon: Epsilon parameter to prevent division by zero error\n :return: Compiled Keras model\n \"\"\"\n\n adam = Adam(learning_rate=learning_rate, beta_1=beta1, beta_2=beta2, epsilon=epsilon)\n self.model_out.compile(\n loss=\"sparse_categorical_crossentropy\",\n optimizer=adam,\n metrics=[\"sparse_categorical_accuracy\"],\n )\n\n return self.model_out\n","sub_path":"src/models/resnet_334k.py","file_name":"resnet_334k.py","file_ext":"py","file_size_in_byte":3877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"245894446","text":"#!/usr/bin/env python3\nfrom collections import Counter, defaultdict\n\n\ndef solution1():\n visited = {}\n\n while True:\n user_input = input(\"Tell mewhere you went: \")\n if not user_input:\n break\n else:\n try:\n city, country = user_input.split(\", \")\n visited.setdefault(country, []).append(city)\n except ValueError:\n print(\"That's not a legal city, state combination\")\n continue\n\n print(\"\\nYou visited:\")\n for country, cities in sorted(visited.items()):\n print(country)\n for city, count in sorted(Counter(cities).items()):\n print(f\"\\t{city} ({count})\") if count > 1 else print(f\"\\t{city}\")\n\n\ndef solution2():\n visits = defaultdict(Counter)\n\n while True:\n\n location = input(\"Tell me where you went: \").strip()\n\n if not location:\n break\n\n if location.count(',') != 1:\n print(\"That's not a legal city, country combination\")\n continue\n\n city, country = location.split(',')\n\n visits[country.strip()][city.strip()] += 1\n\n for country, cities in sorted(visits.items()):\n print(country)\n for one_city, count in sorted(cities.items()):\n if count == 1:\n print(f\"\\t{one_city}\")\n else:\n print(f\"\\t{one_city} ({count})\")\n\n\nif __name__ == '__main__':\n solution1()\n","sub_path":"src/organaize_travel.py","file_name":"organaize_travel.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"314980938","text":"filename = 'pi_million_digits.txt'\nwith open(filename) as file_object:\n lines = file_object.readlines()\n\npi_string = ''\nfor line in lines:\n pi_string += line.strip()\n\nbirthday = input(\"Type your birthdate in format ddmmyy: \")\nif birthday in pi_string:\n print(\"your birthdate apear in the first milion digits of pi!\")\nelse:\n print(\"I am sorry, you are unlucky\")\n","sub_path":"crash_course/files/pi_birthday.py","file_name":"pi_birthday.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"533325097","text":"# -*- coding: utf-8 -*-\nprint(__doc__)\n\n# Author: RACHED Anis \n# License: MIT\n\n# ANALYSE DES DONNEES: VISUALISATION DES DATAS EN 2D\n\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# Simulation à etudier\nSIM = 'SIM3'\nTR_GEN_METH = 'GAUS'\nSTATE = 'INIT'\n\n# Analyser la fonction F(X) = Y\n\n# X est\nX_LAB = 'Essai'\n\n# Y est\nY_LAB = 'TR'\n\ndef mask(df, f):\n return df[f(df)]\n\n# importer les données\nDATA_TO_ANALYSE = TR_GEN_METH+'-TR_'+SIM+'_'+STATE\nPATH_TO_DATA = './out/'+SIM+'/'+DATA_TO_ANALYSE+'.csv'\ndata = pd.read_csv(PATH_TO_DATA)\n\n# Decommenter pour filtrer les données \n#FILTER = 'Correct==1 and TR!=-1' \n#data = data.query(FILTER)\n\nX = data[X_LAB]\nY = data[Y_LAB]\n\ndirectory = './out/'+SIM+'/'\nif not os.path.exists(directory):\n os.makedirs(directory)\n\n# affichage du Y_LAB en fonction des X_LAB\npts = plt.figure(1)\nplt.xlabel(X_LAB)\nplt.ylabel(Y_LAB)\nLABEL = TR_GEN_METH+'-TR_'+SIM+'_ANALYSE:'+Y_LAB+'=F('+X_LAB+')'\nplt.title(LABEL)\nplt.scatter(X, Y)\n\n# Stockage des resultats\npts.savefig(directory+LABEL+'.png')\n\nplt.show()","sub_path":"analyseData2D.py","file_name":"analyseData2D.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"125966577","text":"import re\nimport tokenize\nfrom collections import Counter\nfrom lib2to3 import pytree\nfrom lib2to3.pgen2.driver import Driver\nfrom lib2to3.pygram import python_grammar\n\ntry:\n import pycodestyle as pep8\nexcept ImportError:\n import pep8\n\nfrom flake8_class.__about__ import __version__\n\n\n_driver = Driver(\n grammar=python_grammar,\n convert=pytree.convert,\n)\n\n\nclass ClassChecker(object):\n name = __name__\n version = __version__\n\n def C000(self, start_row, start_col): # noqa\n return {\n 'message': self.C000.message,\n 'line': start_row,\n 'col': start_col,\n }\n C000.message = 'C000 Fix class declaration.'\n\n def __init__(self, tree, filename='(none)', builtins=None):\n self.filename = filename\n\n def get_file_contents(self):\n if self.filename in ('stdin', '-', None):\n return pep8.stdin_get_value().splitlines(True)\n else:\n return pep8.readlines(self.filename)\n\n def run(self):\n file_contents = self.get_file_contents()\n\n noqa_line_numbers = self.get_noqa_lines(file_contents)\n errors = self.get_class_errors(file_contents)\n\n for error in errors:\n if error.get('line') not in noqa_line_numbers:\n yield (error.get('line'), error.get('col'), error.get('message'), type(self))\n\n def get_noqa_lines(self, file_contents):\n tokens = [Token(t) for t in tokenize.generate_tokens(lambda L=iter(file_contents): next(L))]\n return [token.start_row\n for token in tokens\n if token.type == tokenize.COMMENT and token.string.endswith('noqa')]\n\n def get_class_errors(self, file_contents):\n tokens = [Token(t) for t in tokenize.generate_tokens(lambda L=iter(file_contents): next(L))]\n\n gen = iter(tokens)\n\n while True:\n try:\n token = next(gen)\n except StopIteration:\n break\n\n if token.type != tokenize.NAME:\n # ignore non names\n continue\n\n if token.string != 'class':\n # ignore non class declaration\n continue\n\n scope = [token]\n\n start_row, start_col = token.start\n\n last_colon = False\n\n while True:\n try:\n token = next(gen)\n except StopIteration:\n raise NotImplementedError\n else:\n if last_colon:\n if token.type == tokenize.NEWLINE:\n break\n\n scope.append(token)\n\n if token.string == ':':\n last_colon = True\n else:\n last_colon = False\n\n scope = scope[2:-1]\n\n if not scope:\n continue\n\n code = [el.string for el in scope]\n\n assert code[0] == '('\n assert code[-1] == ')'\n\n code = code[1:-1]\n scope = scope[1:-1]\n\n if not code:\n continue\n\n if '\\n' != code[0]:\n yield self.C000(start_row, start_col)\n continue\n\n nls = Counter(code)['\\n']\n\n source = ''.join(code).replace('\\n', ',')\n\n source = re.sub(r'\\,{2}', ',', source)\n\n if source[0] == ',':\n source = ''.join(source.split()[1:])\n\n source = '\\nclass CLS(' + source + '):pass\\n'\n\n tree = _driver.parse_string(source)\n\n nodes = len(tree.children[0].children[3].children)\n\n if nodes != 0:\n if nodes != nls:\n yield self.C000(start_row, start_col)\n continue\n\n\nclass Token:\n '''Python 2 and 3 compatible token'''\n def __init__(self, token):\n self.token = token\n\n @property\n def type(self):\n return self.token[0]\n\n @property\n def string(self):\n return self.token[1]\n\n @property\n def start(self):\n return self.token[2]\n\n @property\n def start_row(self):\n return self.token[2][0]\n\n @property\n def start_col(self):\n return self.token[2][1]\n","sub_path":"flake8_class/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"246467573","text":"import discord\r\nfrom discord.ext import commands\r\n\r\nclass Fun:\r\n \"\"\"My custom cog that does stuff!\"\"\"\r\n\r\n def __init__(self, bot):\r\n self.bot = bot\r\n\r\n @commands.command()\r\n async def wot(self):\r\n \"\"\"wot u mean.\"\"\"\r\n\r\n #Your code will go here\r\n await self.bot.say(\"https://tse1.mm.bing.net/th?id=OIP.M51635a66af420d4b69e5ca4a5b421307o0&w=234&h=187&c=7&rs=1&qlt=90&o=4&pid=1.1\")\r\n\r\ndef setup(bot):\r\n bot.add_cog(Fun(bot))","sub_path":"wot.py","file_name":"wot.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"284674133","text":"from odoo import models, fields, api, _\n\n\nclass AccountMove(models.Model):\n _inherit = 'res.partner'\n\n number_of_rotrut = fields.Integer(string=\"Rot/Rut fakturor\", compute='_rotrut_count')\n\n\n def _rotrut_count(self):\n rotrut_count = self.env['account.move'].search(['|', ('partner_id', '=', self.id), ('partner_id', 'in', self.child_ids.ids), ('is_rotrut','=',True)])\n self.number_of_rotrut = len(rotrut_count)\n\n def rotrut_contact_view(self):\n return {\n 'type': 'ir.actions.act_window',\n 'name': 'Rot/Rut fakturor',\n 'view_mode': 'tree,form',\n 'res_model': 'account.move',\n 'domain': ['|', ('partner_id', '=', self.id), ('partner_id', 'in', self.child_ids.ids), ('is_rotrut','=',True)]\n }\n","sub_path":"l10n_se_tax_report_rotrut/models/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"426386671","text":"# Autor: Claudio Mayoral García\r\n# Es un programa que tiene un menu con diferentes opciones que seleccionar. Y cada una de ellas\r\n# llama a un programa en particular.\r\n\r\n\r\nimport pygame # Librería de pygame\r\nimport random # Libreria para numeros al azar\r\nimport math # Libreria para matemáticas de python\r\n\r\n\r\n# Dimensiones de la pantalla\r\nANCHO = 800\r\nALTO = 800\r\n# Colores\r\nBLANCO = (255, 255, 255) # R,G,B en el rango [0,255], 0 ausencia de color, 255 toda la intensidad\r\nNEGRO = (0, 0, 0)\r\n\r\n\r\n#Esta función da un color aleatorio\r\ndef ponerColor():\r\n x = random.randint(0, 255)\r\n y = random.randint(0, 255)\r\n z = random.randint(0, 255)\r\n color = (x, y, z) #Da valores aleatorios para cada valor del color\r\n return color\r\n\r\n\r\n#Función que dibuja una parábola con lineas de colores aleatorias\r\ndef dibujarParabola():\r\n # Inicializa el motor de pygame\r\n pygame.init()\r\n # Crea una ventana de ANCHO x ALTO\r\n ventana = pygame.display.set_mode((ANCHO, ALTO)) # Crea la ventana donde dibujará\r\n reloj = pygame.time.Clock() # Para limitar los fps\r\n termina = False # Bandera para saber si termina la ejecución, iniciamos suponiendo que no\r\n\r\n while not termina: # Ciclo principal, MIENTRAS la variabel termina sea False, el ciclo se repite automáticamente\r\n # Procesa los eventos que recibe\r\n for evento in pygame.event.get():\r\n if evento.type == pygame.QUIT: # El usuario hizo click en el botón de salir\r\n termina = True # Queremos terminar el ciclo\r\n\r\n ventana.fill(BLANCO)\r\n for x in range(0, 400 + 1, 10):\r\n pygame.draw.line(ventana, ponerColor(), (x, 400), (400, 400 - x))\r\n for x in range(0, 400 + 1, 10):\r\n pygame.draw.line(ventana, ponerColor(), (x, 400), (400, 400 + x))\r\n for x in range(0, 400 + 1, 10):\r\n pygame.draw.line(ventana, ponerColor(), (800 - x, 400), (400, 400 - x))\r\n for x in range(0, 400 + 1, 10):\r\n pygame.draw.line(ventana, ponerColor(), (800 - x, 400), (400, 400 + x))\r\n\r\n pygame.display.flip()\r\n reloj.tick(40)\r\n pygame.quit()\r\n\r\n\r\n#Función que dibuja círculos y cuadrados que aumentan su tamaño de 10 pixeles\r\ndef dibujarCirculo():\r\n # Inicializa el motor de pygame\r\n pygame.init()\r\n # Crea una ventana de ANCHO x ALTO\r\n ventana = pygame.display.set_mode((ANCHO, ALTO)) # Crea la ventana donde dibujará\r\n reloj = pygame.time.Clock() # Para limitar los fps\r\n termina = False # Bandera para saber si termina la ejecución, iniciamos suponiendo que no\r\n\r\n while not termina: # Ciclo principal, MIENTRAS la variabel termina sea False, el ciclo se repite automáticamente\r\n # Procesa los eventos que recibe\r\n for evento in pygame.event.get():\r\n if evento.type == pygame.QUIT: # El usuario hizo click en el botón de salir\r\n termina = True # Queremos terminar el ciclo\r\n ventana.fill(BLANCO)\r\n pygame.draw.circle(ventana, NEGRO, (400, 400), 1, 1)\r\n for radio in range(1, 40):\r\n pygame.draw.circle(ventana, NEGRO, (400, 400), radio*10, 1)\r\n\r\n for x in range(390, 0, -10):\r\n pygame.draw.rect(ventana, NEGRO, (x, x, (400 - x) * 2, (400 - x) * 2), 1)\r\n pygame.display.flip()\r\n reloj.tick(1)\r\n pygame.quit()\r\n\r\n\r\n#Funcion que dibuja una espiral\r\ndef dibujarPiramide():\r\n # Inicializa el motor de pygame\r\n pygame.init()\r\n # Crea una ventana de ANCHO x ALTO\r\n ventana = pygame.display.set_mode((ANCHO, ALTO)) # Crea la ventana donde dibujará\r\n reloj = pygame.time.Clock() # Para limitar los fps\r\n termina = False # Bandera para saber si termina la ejecución, iniciamos suponiendo que no\r\n\r\n while not termina: # Ciclo principal, MIENTRAS la variabel termina sea False, el ciclo se repite automáticamente\r\n # Procesa los eventos que recibe\r\n for evento in pygame.event.get():\r\n if evento.type == pygame.QUIT: # El usuario hizo click en el botón de salir\r\n termina = True # Queremos terminar el ciclo\r\n\r\n ventana.fill(BLANCO)\r\n\r\n for x in range(0, ANCHO + 1, 10):\r\n if x < 400:\r\n pygame.draw.line(ventana, NEGRO, (800 - x, 800 - x), (0 + x, 800 - x))\r\n for x in range(0, ANCHO + 1, 10):\r\n if x < 400:\r\n pygame.draw.line(ventana, NEGRO, (0 + x, 800 - x), (0 + x, x))\r\n for x in range(0, ANCHO + 1, 10):\r\n if x > 400:\r\n pygame.draw.line(ventana, NEGRO, (800 - x, 800 - x), (x - 10, 800 - x))\r\n for x in range(0, ANCHO + 1, 10):\r\n if x > 400:\r\n pygame.draw.line(ventana, NEGRO, (0 + x - 10, 800 - x), (0 + x - 10, x - 10))\r\n pygame.draw.line(ventana, BLANCO, (790, 0), (790, 789), 1)\r\n pygame.draw.line(ventana, BLANCO, (0, 0), (0, 800), 1)\r\n pygame.draw.line(ventana, BLANCO, (0, 0), (800, 0), 1)\r\n\r\n pygame.display.flip()\r\n reloj.tick(1)\r\n pygame.quit()\r\n\r\n\r\n#Función que dibuja doce círculos que interceptan en el centro de la figura creada\r\ndef dibujarCirculos():\r\n # Inicializa el motor de pygame\r\n pygame.init()\r\n # Crea una ventana de ANCHO x ALTO\r\n ventana = pygame.display.set_mode((ANCHO, ALTO)) # Crea la ventana donde dibujará\r\n reloj = pygame.time.Clock() # Para limitar los fps\r\n termina = False # Bandera para saber si termina la ejecución, iniciamos suponiendo que no\r\n\r\n while not termina: # Ciclo principal, MIENTRAS la variabel termina sea False, el ciclo se repite automáticamente\r\n # Procesa los eventos que recibe\r\n for evento in pygame.event.get():\r\n if evento.type == pygame.QUIT: # El usuario hizo click en el botón de salir\r\n termina = True # Queremos terminar el ciclo\r\n\r\n ventana.fill(BLANCO)\r\n for teta in range(0, 360, 30):\r\n radianes = (teta * math.pi)/180 #Convierte a PI Radianes\r\n y = int(math.sin(radianes) * 150)\r\n x = int(math.cos(radianes) * 150)\r\n pygame.draw.circle(ventana, NEGRO, (400 + x, 400 + y), 150, 1)\r\n\r\n pygame.display.flip()\r\n reloj.tick(1)\r\n pygame.quit()\r\n\r\n\r\n#Función que regresa el resultado de la cantidad de veces que 19 es divisible entre numeros de tres cifras\r\ndef tresDigitos():\r\n numero = 0\r\n for m in range(114, 1000, 19):\r\n numero = numero+19\r\n cantidadDeVeces = numero//19\r\n print(\"El número de veces que 19 es divisible en números de 3 dígitos es: \", cantidadDeVeces)\r\n print(\"\")\r\n return cantidadDeVeces\r\n\r\n\r\n#Función que toma el número de terminos y regresa la aproximación\r\ndef aproximarPI():\r\n terminos = int(input(\"Numero de terminos: \"))\r\n suma = 0 #acomulador\r\n for n in range(1, terminos+1):\r\n suma += (1/n**4) # suma = suma + 1/n**2\r\n\r\n ap = (suma*90)**0.25\r\n print(\"PI = \", ap)\r\n\r\n\r\n#Funcion que imprime pirámides con multiplicaciones\r\ndef imprimirPiramides():\r\n numero = 0\r\n for x in range(1, 10):\r\n numero = numero * 10 + x\r\n resultado = numero * 8 + x\r\n print(\"%d x 8 + %d = \" % (numero, x), resultado)\r\n print(\"\")\r\n numero2 = 1\r\n for multiplo in range(1, 10):\r\n resultado = numero2 * numero2\r\n print(\"%d x %d = \" % (numero2, numero2), resultado)\r\n numero2 = numero2 * 10 + 1\r\n print(\"\")\r\n\r\n\r\n#Función que imprime las opciones pra elegir en el menu\r\ndef elegirOpcion():\r\n print(\"1. Dibujar cuadros y círculos\")\r\n print(\"2. Dibujar parábolas \")\r\n print(\"3. Dibujar espiral\")\r\n print(\"4. Dibujar círculo\")\r\n print(\"5. Aproximar PI\")\r\n print(\"6. Contar divisibles entre 19\")\r\n print(\"7. Imprimir pirámides de números\")\r\n print(\"0. Salir \")\r\n opcion = int(input(\"¿Qué desea hacer? \"))\r\n print(\"\")\r\n return opcion\r\n\r\n\r\n#Funcion principal\r\ndef main():\r\n opcion = elegirOpcion()\r\n while opcion != 0:\r\n if opcion == 1:\r\n dibujarCirculo()\r\n elif opcion == 2:\r\n dibujarParabola()\r\n elif opcion == 3:\r\n dibujarPiramide()\r\n elif opcion == 4:\r\n dibujarCirculos()\r\n elif opcion == 5:\r\n aproximarPI()\r\n elif opcion == 6:\r\n tresDigitos()\r\n elif opcion == 7:\r\n imprimirPiramides()\r\n else:\r\n print(\"\")\r\n print(\"No es una opción válida\")\r\n print(\"\")\r\n opcion = elegirOpcion()\r\n print(\"El programa ha terminado\")\r\n\r\n\r\n#Llama a la función principal\r\nmain()\r\n","sub_path":"Mision5.py","file_name":"Mision5.py","file_ext":"py","file_size_in_byte":8598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"462031835","text":"\"\"\"\nJSON:\nИспольз��ется для обмена данными между приложениями, к примеру.\nИспользуется так же для сериализации сложных структур, которые можно десериализировать функцией eval().\n\nXML:\nРасширяемый язык разметки.\nИспользуется для создания и обработки документов программами и одновременно удобный для чтения и создания документов\nчеловеком.\nНазывается расширяемым, поскольку не фиксирует разметку, используемую в документах (разработчик может создать разметку\nв соответствии с потребностями в конкретной области, ограничиваясь только синтаксическими правилами языка).\n\nYAML:\nВ основном используется, как формат для файлов конфигурации.\nТакже является основным языком описания классов, ресурсов и манифестов для пакетов приложений.\n\"\"\"\n\n\ndef parse_cook_book(file_content):\n slovar = {}\n slovar_lst = []\n cook_book = {}\n dish = \"\"\n for i, item in enumerate(file_content):\n tmpstr = item[0]\n if tmpstr.isdigit():\n for j in range(int(tmpstr)):\n dish = file_content[i-1]\n item = file_content[i+j+1]\n slovar.update({'ingredient_name': item[0]})\n slovar.update({'quantity': item[1]})\n slovar.update({'measure': item[2]})\n slovar_lst.append(slovar.copy())\n slovar.clear()\n cook_book.update({dish: slovar_lst.copy()})\n slovar_lst.clear()\n return cook_book\n\n\ndef read_file():\n file_content = []\n with open('dishes.txt', 'r', encoding='UTF-8') as my_file:\n for line in my_file:\n if \"|\" in line:\n line = line.split('|')\n for idx, item in enumerate(line):\n line[idx] = item.strip()\n file_content.append(line)\n else:\n file_content.append(line.strip())\n return file_content\n\n\ndef print_cook_book(cook_book):\n print('-------------- COOK BOOK ----------------------')\n for key in cook_book:\n print(\"\\nDish:\", key)\n dd = cook_book.get(key)\n print(\"ingredients:\")\n j = 0\n for ingrs in dd:\n j += 1\n print(\"[{0}] {1}\".format(j, ingrs.get('ingredient_name')))\n\nprint_cook_book(parse_cook_book(read_file()))\n","sub_path":"Exercise_2.1.py","file_name":"Exercise_2.1.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"221790421","text":"import asyncio\nimport datetime\nimport logging\nimport sys\nfrom typing import Dict\n\nfrom aiogram import Bot, Dispatcher, executor, types\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.utils.exceptions import TelegramAPIError\nfrom apscheduler.jobstores.redis import RedisJobStore\nfrom apscheduler.schedulers.asyncio import AsyncIOScheduler\nfrom inline_timepicker.inline_timepicker import InlineTimepicker\nfrom loguru import logger\n\nimport core.reply_markups as markups\nfrom core import strings\nfrom core.configs import telegram, database, consts\nfrom core.database import db_worker as db\nfrom core.database.models import user_model\nfrom core.reply_markups.callbacks.language_choice import language_callback\nfrom core.reply_markups.inline import available_languages as available_languages_markup\nfrom core.strings.scripts import _\nfrom core.utils import decorators\nfrom core.utils.middlewares import (\n update_middleware,\n logger_middleware\n)\nfrom core.utils.states import (\n MailingEveryoneDialog,\n SetCleaningReminderStates,\n OffCleaningReminderStates,\n ChooseLanguageDialog,\n)\n\nlogging.basicConfig(format=\"[%(asctime)s] %(levelname)s : %(name)s : %(message)s\",\n level=logging.DEBUG, datefmt=\"%Y-%m-%d at %H:%M:%S\")\n\nlogger.remove()\nlogger.add(consts.LOGS_FOLDER / \"debug_logs.log\", format=\"[{time:YYYY-MM-DD at HH:mm:ss}] {level}: {name} : {message}\",\n level=logging.DEBUG,\n colorize=False)\nlogger.add(consts.LOGS_FOLDER / \"info_logs.log\", format=\"[{time:YYYY-MM-DD at HH:mm:ss}] {level}: {name} : {message}\",\n level=logging.INFO,\n colorize=False)\nlogger.add(consts.LOGS_FOLDER / \"warn_logs.log\", format=\"[{time:YYYY-MM-DD at HH:mm:ss}] {level}: {name} : {message}\",\n level=logging.WARNING,\n colorize=False)\nlogger.add(consts.LOGS_FOLDER / \"error_logs.log\", format=\"[{time:YYYY-MM-DD at HH:mm:ss}] {level}: {name} : {message}\",\n level=logging.ERROR,\n colorize=False)\nlogger.add(sys.stderr, format=\"[{time:YYYY-MM-DD at HH:mm:ss}] {level}: {name} : {message}\", level=logging.ERROR,\n colorize=False)\n\nlogging.getLogger('aiogram').setLevel(logging.WARNING)\n\nloop = asyncio.get_event_loop()\nbot = Bot(telegram.BOT_TOKEN, loop=loop, parse_mode=types.ParseMode.HTML)\n\ndp = Dispatcher(bot, storage=MemoryStorage())\n\n# additional helpers\nscheduler = AsyncIOScheduler(timezone=consts.default_timezone, coalesce=True, misfire_grace_time=10000)\nscheduler.add_jobstore(RedisJobStore(db=1,\n host=database.REDIS_HOST,\n port=database.REDIS_PORT))\nscheduler.start()\n\ninline_timepicker = InlineTimepicker()\n\n\n@dp.message_handler(state='*', commands=['cancel'])\n@dp.message_handler(lambda msg: msg.text.lower() == 'cancel', state='*')\nasync def cancel_handler(msg: types.Message, state: FSMContext):\n await state.finish()\n await bot.send_message(msg.from_user.id, _(\"cancel\"))\n\n\n@dp.message_handler(commands=['start'], state='*')\nasync def start_command_handler(msg: types.Message):\n await bot.send_message(msg.chat.id, _(\"start_cmd_text\"))\n\n\n@dp.message_handler(commands=['help'], state='*')\nasync def help_command_handler(msg: types.Message):\n user = await db.get_user(chat_id=msg.from_user.id)\n await bot.send_message(msg.chat.id, _(\"help_cmd_text, formats: {name}\").format(name=user.first_name))\n\n\n@dp.message_handler(commands='language', state='*')\nasync def language_cmd_handler(msg: types.Message):\n await bot.send_message(msg.from_user.id,\n text=_(\"choose language\"),\n reply_markup=available_languages_markup)\n ChooseLanguageDialog.enter_language_callback.set()\n\n\n@dp.callback_query_handler(language_callback.filter(), state=ChooseLanguageDialog.enter_language_callback)\nasync def language_choice_handler(query: types.CallbackQuery, state: FSMContext, callback_data: dict):\n await query.answer()\n await db.update_user(query.from_user.id,\n locale=callback_data['user_locale'])\n from core.strings.scripts import i18n\n i18n.ctx_locale.set(callback_data['user_locale'])\n\n await bot.send_message(query.from_user.id, _(\"language is set\"))\n await state.finish()\n\n\n@dp.message_handler(commands='on', state='*')\nasync def on_cleaning_reminder(msg: types.Message):\n await msg.answer(_(\"choose_campus\"),\n reply_markup=markups.inline.campus_numbers)\n await SetCleaningReminderStates.enter_campus_number.set()\n\n\n@dp.callback_query_handler(markups.callbacks.choose_campus_number.filter(),\n state=SetCleaningReminderStates.enter_campus_number)\nasync def set_campus_number_cb_handler(query: types.CallbackQuery,\n state: FSMContext,\n callback_data: Dict[str, str]):\n await query.answer()\n await query.message.delete()\n async with state.proxy() as proxy:\n proxy['campus_number_set_reminder'] = callback_data['number']\n\n inline_timepicker.init(\n base_time=datetime.time(12, 0),\n min_time=datetime.time(0, 15),\n max_time=datetime.time(23, 45),\n )\n\n await bot.send_message(query.from_user.id,\n _(\"choose_cleaning_reminder_time\"),\n reply_markup=inline_timepicker.get_keyboard())\n await SetCleaningReminderStates.enter_time.set()\n\n\nasync def personal_reminder_about_cleaning(chat_id, campus_number):\n from core.strings.scripts import i18n\n try:\n await bot.send_message(chat_id, _(\"personal_reminder_cleaning, formats: number\",\n locale=await i18n.get_user_locale(None, None, user_id=chat_id))\n .format(number=campus_number))\n except TelegramAPIError as e:\n msg_text = _(\"personal_reminder_cleaning, formats: number\", locale=await i18n.get_user_locale(None, None, user_id=chat_id)).format(number=campus_number)\n logger.exception(f\"TelegramAPIError while sending reminder({chat_id}, {campus_number})\"\n f\"message={msg_text}, locale={await i18n.get_user_locale(None, None, user_id=chat_id)}\"\n f\": {e}\")\n await bot.send_message(chat_id, f\"Сегодня уборка в кампусе {campus_number}\")\n\n\ndef set_cleaning_reminder(chat_id: int, campus_number: int, time: datetime.time):\n if not isinstance(campus_number, int):\n campus_number = int(campus_number)\n for i in range(0, 4):\n base_data = consts.base_dates_campus_cleaning[campus_number][i]\n if base_data:\n run_time = datetime.datetime(year=base_data.year,\n month=base_data.month,\n day=base_data.day,\n hour=time.hour,\n minute=time.minute)\n\n scheduler.add_job(\n personal_reminder_about_cleaning, \"interval\",\n weeks=4, args=[chat_id, campus_number], next_run_time=run_time,\n id=consts.job_id_format.format(\n chat_id=chat_id, campus_number=campus_number, index=i\n ), replace_existing=True\n )\n\n\n@dp.callback_query_handler(inline_timepicker.filter(),\n state=SetCleaningReminderStates.enter_time)\nasync def set_cleaning_reminder_time(query: types.CallbackQuery,\n state: FSMContext,\n callback_data: Dict[str, str]):\n await query.answer()\n reminder_time = inline_timepicker.handle(query.from_user.id, callback_data)\n if reminder_time:\n await bot.edit_message_text(\n _(\"cleaning_reminder_set\"),\n chat_id=query.from_user.id,\n message_id=query.message.message_id\n )\n\n async with state.proxy() as proxy:\n loop.run_in_executor(None,\n set_cleaning_reminder,\n query.from_user.id,\n proxy['campus_number_set_reminder'],\n reminder_time\n )\n await state.finish()\n else:\n await bot.edit_message_reply_markup(\n query.from_user.id,\n message_id=query.message.message_id,\n reply_markup=inline_timepicker.get_keyboard()\n )\n\n\n@dp.message_handler(commands='off', state='*')\nasync def off_cleaning_reminder_command_handler(msg: types.Message):\n from aiogram.types import InlineKeyboardButton, InlineKeyboardMarkup\n campus_set = set()\n for campus in range(1, 5):\n for ind in range(0, 4):\n if scheduler.get_job(consts.job_id_format.format(\n chat_id=msg.from_user.id, campus_number=campus, index=ind\n )):\n campus_set.add(str(campus))\n\n if campus_set:\n campus_set = sorted(campus_set)\n\n kb = InlineKeyboardMarkup(row_width=2)\n kb.add(\n *list(\n InlineKeyboardButton(str(campus),\n callback_data=\n markups.callbacks.choose_campus_number.new(number=campus))\n for campus in campus_set\n )\n )\n await msg.answer(_(\"choose_campus\"), reply_markup=kb)\n await OffCleaningReminderStates.enter_campus_number.set()\n else:\n await msg.answer(_(\"no_reminders_set\"))\n\n\n@dp.callback_query_handler(markups.callbacks.choose_campus_number.filter(),\n state=OffCleaningReminderStates.enter_campus_number)\nasync def off_cleaning_reminder_cb_handler(query: types.CallbackQuery,\n state: FSMContext,\n callback_data: Dict[str, str]):\n campus = int(callback_data['number'])\n for i in range(0, 4):\n if consts.base_dates_campus_cleaning[campus][i]:\n scheduler.remove_job(job_id=consts.job_id_format.format(\n chat_id=query.from_user.id, campus_number=campus, index=i\n ))\n\n await bot.edit_message_text(_(\"reminder_is_off\"),\n chat_id=query.from_user.id,\n message_id=query.message.message_id)\n await state.finish()\n\n\n@decorators.admin\n@dp.message_handler(commands=['send_to_everyone'], state='*')\nasync def send_to_everyone_command_handler(msg: types.Message):\n await bot.send_message(msg.chat.id, _(\"mailing_everyone\"))\n await MailingEveryoneDialog.first()\n\n\n@dp.message_handler(state=MailingEveryoneDialog.enter_message)\nasync def mailing_everyone_handler(msg: types.Message, state: FSMContext):\n await bot.send_message(msg.chat.id, _(\"sent_to_everyone\"))\n scheduler.add_job(send_to_everyone, args=[msg.text])\n await state.finish()\n\n\nasync def send_to_everyone(txt):\n for u in user_model.User.objects():\n try:\n await bot.send_message(u.chat_id, txt)\n except TelegramAPIError:\n pass\n await asyncio.sleep(.5)\n\n\ndef main():\n logger.info(\"Compile .po and .mo before running!\")\n\n update_middleware.on_startup(dp)\n logger_middleware.on_startup(dp)\n strings.on_startup(dp) # enable i18n\n executor.start_polling(dp)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"core/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":11519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"588289024","text":"from django import forms\nfrom bootstrap_datepicker_plus import DatePickerInput, TimePickerInput\nfrom django.forms import ModelForm, Textarea\nfrom .models import UserDatasetSettings, UserDatasetFilter\nfrom django.forms.widgets import NumberInput\nfrom bootstrap_daterangepicker import widgets, fields\n\n\nclass UserDatasetSettingsStartForm(forms.ModelForm):\n class Meta:\n model = UserDatasetSettings\n fields = ['exp_type', 'name']\n labels = {\n 'name': ('Name for your dataset settings'),\n 'exp_type': ('Experiment type'),\n }\n\n\nclass UserDatasetSettingsForm(forms.ModelForm):\n class Meta:\n model = UserDatasetSettings\n fields = ['gender','exp_date1', 'exp_date2', 'age1', 'age2']\n\n labels = {\n 'exp_date1': ('Start date range'),\n 'exp_date2': ('End date range'),\n }\n\n widgets = {\n 'exp_date1': DatePickerInput().start_of('event days'),\n 'exp_date2': DatePickerInput().end_of('event days'),\n 'age1': forms.HiddenInput(),\n 'age2': forms.HiddenInput()\n }\n\n\n\n\nclass DatasetSettingsCategoriesForm(forms.ModelForm):\n class Meta:\n model = UserDatasetSettings\n fields = ['words_cat', 'words_list']\n\n\n\nclass DatasetSettingsEventsForm(forms.ModelForm):\n class Meta:\n model = UserDatasetSettings\n fields = ['event_tmin', 'event_tmax', 'channels_list', 'baseline_tmin',\n 'baseline_tmax', 'heartbeat_artifacts', 'ocular_artifacts']\n\n labels = {\n 'heartbeat_artifacts': 'Heartbeat Artifacts',\n 'ocular_artifacts': 'Ocular Artifacts'\n }\n\n widgets = {\n 'event_tmin': forms.HiddenInput(),\n 'event_tmax': forms.HiddenInput(),\n 'channels_list': forms.HiddenInput(),\n 'baseline_tmin': forms.HiddenInput(),\n 'baseline_tmax': forms.HiddenInput(),\n }\n\nclass DatasetSettingsFilterForm(forms.ModelForm):\n class Meta:\n model = UserDatasetFilter\n fields = ['order', 'low_freq', 'high_freq', 'method']\n\n labels = {\n 'low_freq': ('Low Frequency'),\n 'high_freq': ('High Frequency'),\n }\n\nclass DatasetSettingsConfirmForm(forms.ModelForm):\n class Meta:\n model = UserDatasetSettings\n fields = ['download_file_type']\n\n\n\n\n","sub_path":"create_dataset/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"15785798","text":"\nimport numpy as np\nfrom scipy import misc\n\ndef put_images_on_grid(images, shape=(16,8)):\n nrof_images = images.shape[0]\n img_size = images.shape[1]\n bw = 3\n img = np.zeros((shape[1]*(img_size+bw)+bw, shape[0]*(img_size+bw)+bw, 3), np.float32)\n for i in range(shape[1]):\n x_start = i*(img_size+bw)+bw\n for j in range(shape[0]):\n img_index = i*shape[0]+j\n if img_index>=nrof_images:\n break\n y_start = j*(img_size+bw)+bw\n img[x_start:x_start+img_size, y_start:y_start+img_size, :] = images[img_index, :, :, :]\n if img_index>=nrof_images:\n break\n return img\n\n\ndef flip(image, random_flip):\n if random_flip and np.random.choice([True, False]):\n image = np.fliplr(image)\n return image\n\n\ndef gray2rgb(img):\n w, h = img.shape\n ret = np.empty((w, h, 3), dtype=np.uint8)\n ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img\n return ret\n\n\ndef prewhiten(x):\n mean = np.mean(x)\n std = np.std(x)\n std_adj = np.maximum(std, 1.0 / np.sqrt(x.size))\n y = np.multiply(np.subtract(x, mean), 1 / std_adj)\n return y\n\n\ndef crop(image, random_crop, image_size):\n \"\"\"\n :param image:\n :param random_crop:\n :param image_size:\n :return: crop image from center rect or random\n \"\"\"\n if image.shape[1] > image_size:\n sz1 = int(image.shape[1] // 2)\n sz2 = int(image_size // 2)\n if random_crop:\n diff = sz1 - sz2\n (h, v) = (np.random.randint(-diff, diff + 1), np.random.randint(-diff, diff + 1))\n else:\n (h, v) = (0, 0)\n image = image[(sz1 - sz2 + v):(sz1 + sz2 + v), (sz1 - sz2 + h):(sz1 + sz2 + h), :]\n return image\n\n\ndef load_data(image_paths, do_random_crop, do_random_flip, image_size, do_prewhiten=True):\n \"\"\"\n load image data to memory\n :param image_paths:\n :param do_random_crop:\n :param do_random_flip:\n :param image_size:\n :param do_prewhiten:\n :return:\n \"\"\"\n nrof_samples = len(image_paths)\n images = np.zeros((nrof_samples, image_size, image_size, 3))\n for i in range(nrof_samples):\n img = misc.imread(image_paths[i])\n if img.ndim == 2:\n img = gray2rgb(img)\n if do_prewhiten:\n img = prewhiten(img)\n img = crop(img, do_random_crop, image_size)\n img = flip(img, do_random_flip)\n images[i, :, :, :] = img\n return images","sub_path":"dvalib/dataset/imageutl.py","file_name":"imageutl.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"2004270","text":"import datetime\nfrom market.wallet import Wallet\nfrom market.bag import AppleBag\nclass Person:\n def __init__(self, name, gender, birth_year):\n self.name = name\n self.gender = gender\n self.birth_year = birth_year\n\n @staticmethod\n def _get_current_year():\n current_datetime = datetime.datetime.now()\n current_year = current_datetime.year\n return current_year\n\n def get_age(self):\n return self._get_current_year() - self.birth_year\n\n\nclass Trader(Person):\n def __init__(self, money_amount, apple_amount, name, gender, birth_year):\n self.wallet = Wallet(money_amount)\n self.bag = AppleBag(apple_amount)\n super().__init__(name, gender, birth_year)\n\nif __name__ == '__main__':\n\n wallet = Wallet(300)\n bag = AppleBag(100)\n trader = Trader(300, 50, 'tom', 'male', 1992)\n print(trader.get_age())\n print(trader.wallet.get_amount())\n print(trader.name)","sub_path":"market/trader.py","file_name":"trader.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"653523066","text":"#Problem 152\r\n\"\"\"\r\nWriting 1/2 as a sum of inverse squares\r\n\r\nidea: recursive function, take it or pass to the next with check if still possible\r\n\r\nNeed to remove the cases where it's almost 1/2: should compute cs perfectly\r\n\r\n\"\"\"\r\n\r\n\r\nimport Euler\r\nimport time\r\nstart = time.time()\r\n\r\n# before Frac\r\n# 30 => 4.5s, 31: 8.73s, 32: 16.22s,... 35: 107.4s no answer\r\n# after Frac\r\n# 30 => 18s\r\n\r\n\r\nlim = 30\r\nrevcumsum = [0]*(lim+1)\r\nrevcumsum[lim] = 1/(lim**2)\r\n\r\nfor i in reversed(range(2,lim)):\r\n revcumsum[i] = revcumsum[i+1]+1/(i**2)\r\n\r\nclass Frac:\r\n n = 0\r\n d = 1\r\n\r\n def __init__(self, num, den):\r\n self.n = num\r\n self.d = den\r\n\r\n def __add__(self, other):\r\n new_n = self.n * other.d + self.d * other.n\r\n new_d = self.d * other.d\r\n\r\n cd = Euler.gcd(new_n,new_d)\r\n\r\n return Frac(new_n//cd,new_d//cd)\r\n\r\n def __float__(self):\r\n return self.n/self.d\r\n\r\n def __str__(self):\r\n return str(self.n) + '/' + str(self.d)\r\n\r\nprint(revcumsum)\r\n# lf: list of frac for generating 1/2\r\n# cs: current sum, sum of the lf\r\n# n: the new frac (1/n) to append to the list or not\r\ndef genHalf(lf, cs, n):\r\n #print(lf)\r\n if n > lim or 1/2 - float(cs) > revcumsum[n]:\r\n return # impossible to achieve 1/2\r\n\r\n if cs.n==1 and cs.d == 2: # may be should put a tol\r\n print(cs, lf)\r\n return\r\n\r\n genHalf(lf[:], cs, n + 1)\r\n if float(cs) + 1/(n**2) <= 1/2:\r\n genHalf(lf[:]+ [1/(n**2)],cs + Frac(1,n**2), n + 1)\r\n\r\ngenHalf([],Frac(0,1),2)\r\n\r\nend = time.time()\r\nprint(end-start)","sub_path":"p152/p152_bf.py","file_name":"p152_bf.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"325234349","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index),\n path('store_admin/', views.adminIndex),\n path('orderprocess/', views.orderProcess),\n path('add_product/process/', views.addProduct),\n path('add_price/process/', views.addPrice),\n path('order_complete/', views.orderComplete)\n\n]","sub_path":"shopping/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"339523124","text":"from keras.optimizers import Optimizer\nimport keras.backend as K\nimport numpy as np\nclass Adam_accumulate(Optimizer):\n '''Adam accumulate optimizer.\n\n Default parameters follow those provided in the original paper. Wait for several mini-batch to update\n\n # Arguments\n lr: float >= 0. Learning rate.\n beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.\n epsilon: float >= 0. Fuzz factor.\n\n # References\n - [Adam - A Method for Stochastic Optimization](http://arxiv.org/abs/1412.6980v8)\n '''\n def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999,\n epsilon=1e-8, accum_iters=5, **kwargs):\n super(Adam_accumulate, self).__init__(**kwargs)\n self.__dict__.update(locals())\n self.iterations = K.variable(0)\n self.lr = K.variable(lr)\n self.beta_1 = K.variable(beta_1)\n self.beta_2 = K.variable(beta_2)\n self.accum_iters = K.variable(accum_iters)\n\n def get_updates(self, params, constraints, loss):\n grads = self.get_gradients(loss, params)\n self.updates = [(self.iterations, self.iterations + 1)]\n\n t = self.iterations + 1\n #print t.eval()\n lr_t = self.lr * K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t))\n\n ms = [K.variable(np.zeros(K.get_value(p).shape)) for p in params]\n vs = [K.variable(np.zeros(K.get_value(p).shape)) for p in params]\n gs = [K.variable(np.zeros(K.get_value(p).shape)) for p in params]\n self.weights = ms + vs\n\n for p, g, m, v, gg in zip(params, grads, ms, vs, gs):\n\n flag = K.cast(K.equal(self.iterations % self.accum_iters, 0),'float32')\n\n gg_t = (1 - flag) * (gg + g)\n m_t = (self.beta_1 * m) + (1. - self.beta_1) * (gg + flag * g) / self.accum_iters\n v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square((gg + flag * g) / self.accum_iters) \n p_t = p - flag * lr_t * m_t / (K.sqrt(v_t) + self.epsilon)\n\n self.updates.append((m, flag * m_t + (1 - flag) * m))\n self.updates.append((v, flag * v_t + (1 - flag) * m))\n self.updates.append((gg, gg_t))\n\n new_p = p_t\n # apply constraints\n if p in constraints:\n c = constraints[p]\n new_p = c(new_p)\n self.updates.append((p, new_p))\n # print self.updates\n return self.updates\n\n def get_config(self):\n config = {'lr': float(K.get_value(self.lr)),\n 'beta_1': float(K.get_value(self.beta_1)),\n 'beta_2': float(K.get_value(self.beta_2)),\n 'epsilon': self.epsilon}\n base_config = super(Adam_accumulate, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n","sub_path":"accadam.py","file_name":"accadam.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"265552243","text":"# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright (c) 2016-2017 Anaconda, Inc.\n#\n# May be copied and distributed freely only as part of an Anaconda or\n# Miniconda installation.\n# -----------------------------------------------------------------------------\n\"\"\"Update application dialog.\"\"\"\n\n# yapf: disable\n\n# Third party imports\nfrom qtpy.QtWidgets import QHBoxLayout, QLabel, QVBoxLayout\n\n# Local imports\nfrom anaconda_navigator.config import CONF\nfrom anaconda_navigator.utils.analytics import GATracker\nfrom anaconda_navigator.widgets import (ButtonNormal, ButtonPrimary,\n SpacerHorizontal, SpacerVertical)\nfrom anaconda_navigator.widgets.dialogs import DialogBase\n\n\n# yapf: enable\n\n\nclass DialogUpdateApplication(DialogBase):\n \"\"\"Update application dialog.\"\"\"\n\n WIDTH = 460\n\n def __init__(self, version, config=CONF, startup=False, qa_testing=False):\n \"\"\"\n Update application dialog.\n\n Parameter\n ---------\n version: str\n New version of update available.\n \"\"\"\n super(DialogUpdateApplication, self).__init__()\n self.tracker = GATracker()\n\n self.label = QLabel(\n \"There's a new version of Anaconda Navigator available. \"\n \"We strongly recommend you to update.

\"\n \"If you click yes, Anaconda Navigator will close and then the \"\n \"Anaconda Navigator Updater will start.


\"\n \"Do you wish to update to Anaconda Navigator {0} now?\"\n \"

\".format(version)\n )\n self.button_yes = ButtonPrimary('Yes')\n self.button_no = ButtonNormal('No, remind me later')\n self.button_no_show = ButtonNormal(\"No, don't show again\")\n self.config = config\n\n if not startup:\n self.button_no_show.setVisible(False)\n self.button_no.setText('No')\n\n # Widgets setup\n self.label.setWordWrap(True)\n self.setMinimumWidth(self.WIDTH)\n self.setMaximumWidth(self.WIDTH)\n self.setWindowTitle('Update Application')\n\n # On QA testing addicon continuumcrew channel allows to test that\n # the update checking mechanism is working with a dummy package\n # version 1000.0.0, this disallows any installation when using that\n # check\n if qa_testing:\n self.button_yes.setDisabled(True)\n self.button_no.setDisabled(True)\n self.button_no_show.setDisabled(True)\n\n # Layouts\n layout_buttons = QHBoxLayout()\n layout_buttons.addStretch()\n layout_buttons.addWidget(self.button_no_show)\n layout_buttons.addWidget(SpacerHorizontal())\n layout_buttons.addWidget(self.button_no)\n layout_buttons.addWidget(SpacerHorizontal())\n layout_buttons.addWidget(self.button_yes)\n\n layout = QVBoxLayout()\n layout.addWidget(self.label)\n layout_buttons.addWidget(SpacerVertical())\n layout_buttons.addWidget(SpacerVertical())\n layout.addLayout(layout_buttons)\n self.setLayout(layout)\n\n # Signals\n self.button_yes.clicked.connect(self.accept)\n self.button_no.clicked.connect(self.reject)\n self.button_no_show.clicked.connect(self.no_show)\n\n self.button_yes.setFocus()\n\n def no_show(self):\n \"\"\"Handle not showing updates on startup.\"\"\"\n self.config.set('main', 'hide_update_dialog', True)\n self.reject()\n\n\n# --- Local testing\n# -----------------------------------------------------------------------------\ndef local_test(): # pragma: no cover\n \"\"\"Run local tests.\"\"\"\n from anaconda_navigator.utils.qthelpers import qapplication\n\n app = qapplication(test_time=3)\n widget = DialogUpdateApplication(version='1.5.0', startup=True)\n widget.update_style_sheet()\n widget.show()\n app.exec_()\n\n\nif __name__ == '__main__': # pragma: no cover\n local_test()\n","sub_path":"lib/python2.7/site-packages/anaconda_navigator/widgets/dialogs/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":4005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"243489113","text":"import os\n\nif __name__ == \"__main__\":\n datasets = {\n \"dsprites\": {\n \"decoder_models\": {\n 'btcvae': None, \n 'factor_VAE': None, \n 'VAE': None, \n 'betaH_VAE': None,\n 'betaB_VAE': None,\n 'InfoGAN-CR': None,\n },\n },\n \"celeba\": {\n \"decoder_models\": {\n 'btcvae': None, \n 'factor_VAE': None, \n 'VAE': None, \n 'betaH_VAE': None,\n 'betaB_VAE': None,\n 'InfoGAN-CR': None,\n 'BEGAN': None,\n 'WGAN': None,\n },\n },\n \"celebahq\": {\n \"decoder_models\": {\n 'PGAN': None, \n 'StyleGAN': None, \n },\n },\n }\nsuffixes = ['l100', '2', '3', '4', '5']\nsups = ['sup', 'unsup']\nfor dataset_name, details in datasets.items():\n print(details)\n for decoder_model, decoder_checkpoint in details[\"decoder_models\"].items():\n for suffix in suffixes:\n print(decoder_model, suffix)\n if decoder_model == 'InfoGAN-CR' and suffix == 'l100':\n suffix = None\n for sup in sups: \n to_run = f\"python gen_cov.py --dataset {dataset_name} --decoder_model {decoder_model} --search_n_clusters --save_scores --plot --scores_file {dataset_name}_{sup}_var2 \"\n if suffix is not None:\n to_run += f\" --suffix {suffix} \"\n if sup == 'sup':\n print('supervised')\n to_run += f' --sup '\n\n os.system(to_run)\n","sub_path":"run_gen_cov.py","file_name":"run_gen_cov.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"458985467","text":"# System of non linear equations\nfrom math import log, fabs\nfrom copy import deepcopy\nimport numpy as np\n\n\ndef System(N, X):\n if N == 1:\n return -0.1 * X[1]**2 - 0.2 * X[2]**2 + 0.3\n elif N == 2:\n return -0.1 * X[1]**2 + 0.1 * X[1] * X[2] + 0.7\n\n\ndef MPI(n, m, X, eps=1e-3):\n k = 0\n while True:\n d = 0\n b = deepcopy(X)\n A = deepcopy(b)\n A[1] = System(1, X)\n X[1] = A[1]\n A[2] = System(2, X)\n X[2] = A[2]\n A = deepcopy(b)\n for i in range(1, n + 1):\n d1 = fabs(X[i] - A[i])\n if d < d1:\n d = d1\n k += 1\n if (d <= eps):\n print(\"Solution is \", X, \"\\nnumber of iteration=\")\n break\n A = deepcopy(X)\n if k > m:\n print(\"Процес розбігається!\")\n\n\nX = np.array([0., 0.25, 0.75])\nn = 2\nm = 10\nMPI(n, m, X)\n","sub_path":"nm/3/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"391789019","text":"# -*- coding:utf-8 -*-\r\n'''\r\n利用高德地图api实现经纬度与地址的批量转换\r\n'''\r\nimport requests\r\nimport pandas as pd\r\nimport time\r\nimport importlib\r\nimport sys\r\n\r\nimportlib.reload(sys)\r\n\r\n#文档读取\r\ndef parse():\r\n datas = []\r\n totalListData = pd.read_csv('locs.csv', encoding='gb2312')\r\n totalListDict = totalListData.to_dict('index')\r\n for i in range(0, len(totalListDict)):\r\n datas.append(str(totalListDict[i]['cityName']))\r\n return datas\r\n\r\n\r\ndef transform(cityName):\r\n ak = 'zijishenqing'\r\n base = \"http://restapi.amap.com/v3/geocode/geo?key=%s&address=%s&city=%s\" % (\r\n ak, cityName,cityName)\r\n response = requests.get(base)\r\n answer = response.json()\r\n if ((answer['geocodes']!= []) and (answer['geocodes'][0]['city'] != [])):\r\n return answer['geocodes'][0]['province'], cityName, answer['geocodes'][0]['location']\r\n else:\r\n return 0\r\n\r\nif __name__ == '__main__':\r\n i = 0\r\n count = 0\r\n df = pd.DataFrame(columns=['province', 'city', 'location'])\r\n cityNames = parse()\r\n for cityName in cityNames:\r\n if(transform(cityName)!=0):\r\n province, city, location = transform(cityName)\r\n df.loc[i] = [province, city, location]\r\n i = i + 1\r\n df.to_csv('locdetail4.csv', index=False)\r\n","sub_path":"env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"560403858","text":"'''\nList of Depths:\n Create a linked list of nodes at each depth\n'''\nclass Node:\n def __init__(self, item):\n self.right = None\n self.left = None\n self.next = None\n self.val = item\n\ndef list_of_depths(root):\n\n queue = []\n queue.append(root)\n queue.append(None) # append a None to signify the end of the level\n\n while queue:\n curr = queue.pop(0)\n if curr: \n curr.next = queue[0] # if it is not none, set the next pointer to the new top of the queue\n if curr.left:\n queue.append(curr.left) # append the left\n if curr.right:\n queue.append(curr.right) # append the right\n elif queue:\n queue.append(None) # if it is None, set the new end to the level\n \n\n","sub_path":"Cracking the Coding Interview/Trees and Graphs/ListofDepths.py","file_name":"ListofDepths.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"103302650","text":"import numpy as np\nimport matplotlib.pyplot as plt\n \n# data to plot\nn_groups = 6\n\n#these just copied straight in from the excel sheet, didn't make sense to compute again here\nmeans_vr = (31.2, 24.8, 16.8, 16.2, 12.6, 16.9)\nmeans_nvr = (24.5, 21.9\t,14.2\t,13.7\t,12,\t16.5)\nsd_vr = (3.894440482,\t1.872490914,\t3.708099244,\t3.004626063,\t1.269295518,\t3.082207001)\nsd_nvr = (6.149977416,\t2.469817807,\t3.521363372,\t2.71006355,\t1.699673171,\t2.223610677)\n \n# create plot\nfig, ax = plt.subplots()\n\nfor i in range(0, 38):\n\tax.axhline(i, color=\"white\", zorder=-1)\nindex = np.arange(n_groups)\nbar_width = 0.15\nopacity = 0.8\n\n\n \nrects1 = plt.bar(index, means_vr, bar_width,\n alpha=opacity,\n color='#395177',\n label='HMD', yerr=sd_vr, ecolor='#5d5f60')\n \nrects2 = plt.bar(index + bar_width, means_nvr, bar_width,\n alpha=opacity,\n color='#6f6f6f',\n label='Handheld', yerr=sd_nvr, ecolor='#5d5f60')\n \n# plt.xlabel('Visual Feedback Method')\n# plt.ylabel('Scores')\nplt.title('Mean Scores by Presence Factor')\nplt.xticks(index + bar_width, ('Realism \\n S, U = 19', 'Possibility To Act \\n S, U = 18', \n\t'Quality of Interface \\n NS, U = 27.5', 'Possibility to Examine \\n S, U = 25.5', 'Self-evaluation Of \\n Performance \\n NS, U = 38' , \n\t'Sounds \\n NS, U = 41'), rotation=65, fontsize=8)\nplt.yticks(np.arange(0, 40, 5))\nplt.legend()\n\nax.patch.set_facecolor('#f6f6f6')\n \nplt.tight_layout()\nplt.show()","sub_path":"Report/log_data_and_python_scripts/bar.py","file_name":"bar.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"438434673","text":"import sys\n\nfrom random import randint\nfrom PyQt5.QtGui import QPainter, QColor\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(800, 600)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton.setGeometry(QtCore.QRect(330, 490, 131, 41))\n font = QtGui.QFont()\n font.setPointSize(10)\n self.pushButton.setFont(font)\n self.pushButton.setObjectName(\"pushButton\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 26))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.pushButton.setText(_translate(\"MainWindow\", \"Кнопка\"))\n\n\nclass MyWidget(QMainWindow, Ui_MainWindow):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n\n self.setWindowTitle('Не жёлтый круг')\n self.do_paint = False\n self.pushButton.clicked.connect(self.paint)\n\n def paintEvent(self, event):\n if self.do_paint:\n qp = QPainter()\n qp.begin(self)\n self.draw_flag(qp)\n qp.end()\n\n def paint(self):\n self.do_paint = True\n self.repaint()\n\n def draw_flag(self, qp):\n qp.setBrush(QColor(randint(0, 255), randint(0, 255), randint(0, 255)))\n x, y, r = randint(0, 800), randint(0, 600), randint(1, 600)\n qp.drawEllipse(x - r // 2, y - r // 2, r, r)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = MyWidget()\n ex.show()\n sys.exit(app.exec_())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"89822339","text":"import csv\nfrom abc import ABC\n\nimport torch\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\nimport os\nimport datetime\n\n\nclass LogTxt(object):\n ### train logger\n def __init__(self, path, description):\n if not os.path.exists(path):\n os.mkdir(path)\n self.file = os.path.join(path, 'train_log.txt')\n logger = open(self.file, 'a')\n logger.write('\\n' + description + '\\n')\n logger.close()\n\n def log_train(self, epoch, loss, optimizer):\n curr_time = datetime.datetime.now()\n curr_time = datetime.datetime.strftime(curr_time, '%Y-%m-%d %H:%M:%S')\n logger = open(self.file, 'a')\n logger.write('\\n' + '--'*40 +\n '\\n {Time}\\tEpoch: {epoch} \\tLoss: {loss:0.4f}\\tLR: {lr:0.6f}'.format(Time=curr_time, epoch=epoch, loss=loss, lr=optimizer.param_groups[0]['lr'])\n )\n logger.close()\n\n def log_val(self, acc, best_acc):\n logger = open(self.file, 'a')\n logger.write('\\n \\t \\t Validate Score : {acc}\\t Best Score : {best}'.format(acc=acc, best=best_acc))\n logger.close()\n\n\nfrom torch.optim.lr_scheduler import _LRScheduler\nclass WarmUpLR(_LRScheduler):\n \"\"\"warmup_training learning rate scheduler\n Args:\n optimizer: optimzier(e.g. SGD)\n total_iters: totoal_iters of warmup phase\n \"\"\"\n\n def __init__(self, optimizer, total_iters, last_epoch=-1):\n self.total_iters = total_iters\n super().__init__(optimizer, last_epoch)\n\n def get_lr(self):\n \"\"\"we will use the first m batches, and set the learning\n rate to base_lr * m / total_iters\n \"\"\"\n return [base_lr * self.last_epoch / (self.total_iters + 1e-8) for base_lr in self.base_lrs]\n\n\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\n\n\nclass FrameLoss(nn.Module):\n def __init__(self):\n super(FrameLoss, self).__init__()\n self.loss = nn.MSELoss(reduction='sum')\n\n def forward(self, input, label):\n target = torch.zeros_like(input)\n N, T = input.size()\n for i in range(N):\n position = label[i].item() - 1\n floor, ceil = math.floor(position), math.ceil(position)\n target[i][floor] = 1\n target[i][ceil] = 1\n if floor >= 1:\n target[i][floor - 1] = 0.5\n if ceil < T - 1:\n target[i][ceil + 1] = 0.5\n if input.is_cuda:\n target = target.cuda()\n return self.loss(input, target)\n\n\nclass Loss_v2(nn.Module):\n def __init__(self):\n super(Loss_v2, self).__init__()\n\n def forward(self, input, label):\n \"\"\"\n :param input: 模型的输出结果\n :param label: 标签位置比例, 0~1\n :return: 标签转换为热图平滑标签后,计算交叉熵\n \"\"\"\n target = torch.zeros_like(input)\n N, T = input.size()\n for i in range(N):\n position = label[i].item() * T - 1\n floor, ceil = math.floor(position), math.ceil(position)\n # 热图平滑标签\n if floor >= 1:\n target[i][floor - 1] = 0.1\n target[i][floor] = 0.4\n else:\n target[i][floor] = 0.5\n if ceil < T - 1:\n target[i][ceil + 1] = 0.1\n target[i][ceil] = 0.4\n else:\n target[i][ceil] = 0.5\n\n if input.is_cuda:\n target = target.cuda()\n\n log_likelihood = - torch.log_softmax(input, dim=1)\n\n return torch.sum(torch.mul(log_likelihood, target), dim=1).mean()\n\n\nif __name__ == '__main__':\n loss = nn.MSELoss(reduction='sum')\n input = torch.randn(4, 210, requires_grad=True)\n target = torch.zeros_like(input)\n output = loss(input, target)\n print(output)\n","sub_path":"multiModal_AV/models/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"178188412","text":"import numpy as np\n\n\ndef find_y(point_1: list, point_2: list, x: float) -> float:\n return -(((point_1[1] - point_2[1]) * x + (point_1[0] * point_2[1] - point_2[0] * point_1[1])) /\n (point_2[0] - point_1[0]))\n\n\ndef reverse_triangle_points(points: list) -> list:\n \"\"\"Format triangle points counterclockwise\"\"\"\n\n point_1 = points[0]\n point_2 = points[1]\n point_3 = points[2]\n\n if point_1[0] < point_2[0]:\n if find_y(point_1, point_2, point_3[0]) > point_3[1]:\n return list(reversed(points))\n else:\n return points\n elif point_1[0] > point_2[0]:\n if find_y(point_1, point_2, point_3[0]) > point_3[1]:\n return points\n else:\n return list(reversed(points))\n else:\n if point_1[1] > point_2[1]:\n if point_3[0] > point_1[0]:\n return points\n else:\n return list(reversed(points))\n else:\n if point_3[0] < point_1[0]:\n return points\n else:\n return list(reversed(points))\n\n\ndef contains_number(number: int, arr: list) -> bool:\n for i in arr:\n if number == i:\n return True\n\n return False\n\n\ndef find_triangle_points(triangle_number: int, triangles, all_points: list) -> list:\n points_numbers = []\n triangle_points = []\n\n for edge in triangles:\n if edge[0] == triangle_number:\n points_numbers.append(edge[1])\n points_numbers.append(edge[2])\n points_numbers.append(edge[3])\n\n for point in all_points:\n if contains_number(point[0], points_numbers):\n triangle_points.append([point[1], point[2]])\n\n return list(reverse_triangle_points(triangle_points))\n\n\ndef matrix_normalization(k):\n result = np.array(([k.item(0), k.item(1), k.item(2)],\n [k.item(3), k.item(4), k.item(5)],\n [k.item(6), k.item(7), k.item(8)]))\n # print(f\"normalization: {result}\")\n return result\n\n\ndef find_numbers(str):\n point_number = 0\n point_coord_x = 0\n point_coord_y = 0\n i = 0\n while str[i] == '' or str[i] == ' ':\n i+=1\n\n temp_str = ''\n while str[i] != ' ':\n temp_str += str[i]\n i += 1\n point_number = int(temp_str)\n\n temp_str = ''\n i += 4\n while str[i] != ' ':\n temp_str += str[i]\n i += 1\n point_coord_x = float(temp_str)\n\n i += 2\n temp_str = ''\n while str[i] != ' ':\n temp_str += str[i]\n i += 1\n point_coord_y = float(temp_str)\n return point_number, point_coord_x, point_coord_y\n # print(f'point {point_number}: {point_coord_x} {point_coord_y}')\n\n\ndef get_point_by_coords(coords):\n points = []\n temp_arr = []\n file = open('Model/_triangle.1.node', 'r')\n for line in file:\n temp_arr.append(line)\n temp_arr.pop(0)\n temp_arr.pop(-1)\n for i in temp_arr:\n point_number, point_coord_x, point_coord_y = find_numbers(i)\n points.append([point_number, point_coord_x, point_coord_y])\n # print(temp_arr)\n # print(points)\n point = 0\n for i in points:\n if i[1] == coords[0] and i[2] == coords[1]:\n point = i[0]\n # print(point)\n return point\n\n\ndef remove_repeated_lists(arr: list):\n new_arr = []\n for i in arr:\n if i not in new_arr:\n new_arr.append(i)\n return new_arr\n\n\nremove_repeated_lists([[1, 2], [1, 2], [1, 1]])\n\n# get_point_by_coords([0.5, 1.0])\n# find_numbers(' 20 0.6728515625 0.4228515625 0')\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"328678512","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nPlot mean/std per point.\n\"\"\"\n\nimport argparse\nimport json\nimport os\n\nimport numpy as np\n\nfrom scilpy.io.utils import (add_overwrite_arg, assert_inputs_exist,\n assert_output_dirs_exist_and_empty)\nfrom scilpy.utils.metrics_tools import plot_metrics_stats\n\n\ndef _build_arg_parser():\n p = argparse.ArgumentParser(description=__doc__,\n formatter_class=argparse.RawTextHelpFormatter)\n p.add_argument('in_json',\n help='JSON file containing the mean/std per point. For '\n 'example, can be created using '\n 'scil_compute_metrics_along_streamline.')\n p.add_argument('out_dir',\n help='Output directory.')\n\n p.add_argument('--fill_color',\n help='Hexadecimal RGB color filling the region between '\n 'mean ± std. The hexadecimal RGB color should be '\n 'formatted as 0xRRGGBB.')\n\n add_overwrite_arg(p)\n return p\n\n\ndef main():\n parser = _build_arg_parser()\n args = parser.parse_args()\n\n assert_inputs_exist(parser, args.in_json)\n assert_output_dirs_exist_and_empty(parser, args, args.out_dir,\n create_dir=True)\n\n if args.fill_color and len(args.fill_color) != 8:\n parser.error('Hexadecimal RGB color should be formatted as 0xRRGGBB')\n\n with open(args.in_json, 'r+') as f:\n mean_std_per_point = json.load(f)\n\n for bundle_name, bundle_stats in mean_std_per_point.items():\n for metric, metric_stats in bundle_stats.items():\n nb_points = len(metric_stats)\n num_digits_labels = len(str(nb_points))\n means = []\n stds = []\n for label_int in range(1, nb_points+1):\n label = str(label_int).zfill(num_digits_labels)\n mean = metric_stats.get(label, {'mean': np.nan})['mean']\n mean = mean if mean else np.nan\n std = metric_stats.get(label, {'std': np.nan})['std']\n std = std if std else np.nan\n means += [mean]\n stds += [std]\n\n fig = plot_metrics_stats(\n np.array(means), np.array(stds),\n title=bundle_name,\n xlabel='Location along the streamline',\n ylabel=metric,\n fill_color=(args.fill_color.replace(\"0x\", \"#\")\n if args.fill_color else None))\n fig.savefig(\n os.path.join(args.out_dir, '{}_{}.png'.format(bundle_name,\n metric)),\n bbox_inches='tight')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/scil_plot_mean_std_per_point.py","file_name":"scil_plot_mean_std_per_point.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"436359240","text":"import tensorflow as tf\n\nfrom ssd.common.box_utils import convert_to_xywh, rescale_boxes, absolute_to_relative, swap_xy, relative_to_absolute\nfrom ssd.common.label_encoder import LabelEncoder\n\n_policy = tf.keras.mixed_precision.experimental.global_policy()\n\n\nclass DatasetBuilder:\n\n def __init__(self, split, config):\n self._dataset = None\n self._split = split\n self._backbone = config['backbone']\n self._label_encoder = LabelEncoder(config)\n self._input_height = config['image_height']\n self._input_width = config['image_width']\n self._batch_size = config['batch_size']\n self._tfrecords = tf.data.Dataset.list_files(config['tfrecords_' + split])\n self._augment_val_dataset = config['augment_val_dataset']\n self._random_brightness = config['random_brightness']\n self._random_contrast = config['random_contrast']\n self._random_saturation = config['random_saturation']\n self._random_flip_horizonal = config['random_flip_horizonal']\n self._random_patch = config['random_patch']\n self._brightness_max_delta = config['brightness_max_delta']\n self._contrast_lower = config['contrast_lower']\n self._contrast_upper = config['contrast_upper']\n self._saturation_lower = config['saturation_lower']\n self._saturation_upper = config['saturation_upper']\n self._min_obj_covered = config['min_obj_covered']\n self._area_range = config['area_range']\n self._aspect_ratio_range = config['aspect_ratio_range']\n self._cache_dataset_in_memory = config['cache_dataset_in_memory']\n self._build_tfrecord_dataset()\n\n def _random_flip_horizontal_fn(self, image, boxes):\n w = tf.cast(tf.shape(image)[1], dtype=_policy.compute_dtype)\n if tf.random.uniform(()) > 0.5:\n image = tf.image.flip_left_right(image)\n boxes = tf.stack(\n [w - boxes[:, 2], boxes[:, 1], w - boxes[:, 0], boxes[:, 3]],\n axis=-1)\n return image, boxes\n\n def _random_brightness_fn(self, image):\n image = tf.image.random_brightness(image, self._brightness_max_delta)\n return tf.clip_by_value(image, 0.0, 1)\n\n def _random_contrast_fn(self, image):\n image = tf.image.random_contrast(image, self._contrast_lower, self._contrast_upper)\n return tf.clip_by_value(image, 0.0, 1)\n\n def _random_saturation_fn(self, image):\n image = tf.image.random_contrast(image, self._saturation_lower, self._saturation_upper)\n return tf.clip_by_value(image, 0.0, 1)\n\n def _filter_and_adjust_labels(self, crop_box, boxes, classes):\n boxes = tf.cast(boxes, dtype=_policy.compute_dtype)\n crop_box = tf.cast(crop_box, dtype=_policy.compute_dtype)\n\n offsets = tf.concat([\n crop_box[:, :2] - boxes[:, 2:],\n boxes[:, :2] - crop_box[:, 2:],\n ], axis=-1)\n\n crop_box_width = crop_box[:, 2] - crop_box[:, 0]\n crop_box_height = crop_box[:, 3] - crop_box[:, 1]\n\n adjusted_boxes = tf.stack([\n tf.clip_by_value(boxes[:, 0] - crop_box[:, 0], 0, crop_box_width),\n tf.clip_by_value(boxes[:, 1] - crop_box[:, 1], 0, crop_box_height),\n tf.clip_by_value(boxes[:, 2] - crop_box[:, 0], 0, crop_box_width),\n tf.clip_by_value(boxes[:, 3] - crop_box[:, 1], 0, crop_box_height)\n ], axis=-1)\n\n idx = tf.where(tf.logical_not(tf.reduce_all(offsets >= 0, axis=-1)))[:, 0]\n return tf.gather(adjusted_boxes, idx), tf.gather(classes, idx)\n\n def _random_patch_fn(self, image, boxes, classes):\n boxes = absolute_to_relative(boxes, tf.shape(image))\n\n start, size, crop_box = tf.python.image.sample_distorted_bounding_box_v2(\n image_size=tf.shape(image),\n bounding_boxes=tf.expand_dims(swap_xy(boxes), axis=0),\n min_object_covered=self._min_obj_covered,\n area_range=self._area_range,\n aspect_ratio_range=self._aspect_ratio_range)\n\n crop_box = relative_to_absolute(swap_xy(crop_box[0]), tf.shape(image))\n\n cropped_image = tf.slice(image, start, size)\n cropped_image.set_shape([None, None, 3])\n\n boxes = relative_to_absolute(boxes, tf.shape(image))\n adjusted_boxes, adjusted_classes = self._filter_and_adjust_labels(crop_box, boxes, classes)\n\n return cropped_image, adjusted_boxes, adjusted_classes\n\n def _augment_data(self, image, boxes, classes):\n if self._split == 'val' and not self._augment_val_dataset:\n return image, boxes, classes\n image = image / 255.0\n if self._random_patch:\n image, boxes, classes = self._random_patch_fn(image, boxes, classes)\n if self._random_flip_horizonal:\n image, boxes = self._random_flip_horizontal_fn(image, boxes)\n if self._random_brightness:\n image = self._random_brightness_fn(image)\n if self._random_contrast:\n image = self._random_contrast_fn(image)\n if self._random_saturation:\n image = self._random_saturation_fn(image)\n image = image * 255.0\n return image, boxes, classes\n\n def _parse_example(self, example_proto):\n feature_description = {\n 'image': tf.io.FixedLenFeature([], tf.string),\n 'xmins': tf.io.VarLenFeature(tf.float32),\n 'ymins': tf.io.VarLenFeature(tf.float32),\n 'xmaxs': tf.io.VarLenFeature(tf.float32),\n 'ymaxs': tf.io.VarLenFeature(tf.float32),\n 'classes': tf.io.VarLenFeature(tf.int64),\n }\n\n parsed_example = tf.io.parse_single_example(example_proto,\n feature_description)\n classes = tf.sparse.to_dense(parsed_example['classes'])\n\n image = tf.io.decode_image(parsed_example['image'], channels=3)\n image = tf.cast(image, dtype=_policy.compute_dtype)\n image.set_shape([None, None, 3])\n\n boxes = tf.stack([\n tf.sparse.to_dense(parsed_example['xmins']),\n tf.sparse.to_dense(parsed_example['ymins']),\n tf.sparse.to_dense(parsed_example['xmaxs']),\n tf.sparse.to_dense(parsed_example['ymaxs']),\n ], axis=-1)\n boxes = tf.cast(boxes, dtype=_policy.compute_dtype)\n return image, boxes, classes\n\n def _parse_and_create_label(self, example_proto):\n image, boxes, classes = self._parse_example(example_proto)\n original_dims = tf.cast(tf.shape(image), dtype=_policy.compute_dtype)\n boxes = tf.stack([\n tf.clip_by_value(boxes[..., 0], 0, original_dims[1]),\n tf.clip_by_value(boxes[..., 1], 0, original_dims[0]),\n tf.clip_by_value(boxes[..., 2], 0, original_dims[1]),\n tf.clip_by_value(boxes[..., 3], 0, original_dims[0])\n ], axis=-1)\n\n image, boxes, classes = self._augment_data(image, boxes, classes)\n new_dims = tf.shape(image)\n image = tf.image.resize(image,\n size=[self._input_height, self._input_width])\n boxes = rescale_boxes(boxes,\n [new_dims[0], new_dims[1]],\n [self._input_height, self._input_width])\n\n if 'resnet' in self._backbone:\n image = (image - 127.5) / 127.5\n\n boxes_xywh = convert_to_xywh(boxes)\n label = self._label_encoder.encode_sample(boxes_xywh, classes)\n return image, label\n\n def _build_tfrecord_dataset(self):\n dataset = self._tfrecords.interleave(\n tf.data.TFRecordDataset,\n cycle_length=8,\n block_length=32,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n if self._cache_dataset_in_memory:\n dataset = dataset.cache()\n dataset = dataset.shuffle(512)\n dataset = dataset.map(self._parse_and_create_label,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n dataset = dataset.batch(self._batch_size, drop_remainder=True)\n dataset = dataset.repeat()\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n self._dataset = dataset\n\n @property\n def dataset(self):\n return self._dataset\n","sub_path":"ssd/data/dataset_builder.py","file_name":"dataset_builder.py","file_ext":"py","file_size_in_byte":8245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"155947021","text":"# coding: utf-8\n# Copyright (c) JUMP2 Development Team.\n# Distributed under the terms of the JLU License.\n\n\n#=================================================================\n# This file is part of JUMP2.\n#\n# Copyright (C) 2017 Jilin University\n#\n# Jump2 is a platform for high throughput calculation. It aims to \n# make simple to organize and run large numbers of tasks on the \n# superclusters and post-process the calculated results.\n# \n# Jump2 is a useful packages integrated the interfaces for ab initio \n# programs, such as, VASP, Guassian, QE, Abinit and \n# comprehensive workflows for automatically calculating by using \n# simple parameters. Lots of methods to organize the structures \n# for high throughput calculation are provided, such as alloy,\n# heterostructures, etc.The large number of data are appended in\n# the MySQL databases for further analysis by using machine \n# learning.\n#\n# Jump2 is free software. You can redistribute it and/or modify it\n# under the terms of the GNU General Public License as published \n# by the Free sofware Foundation, either version 3 of the License,\n# or (at your option) and later version.\n# \n# You should have recieved a copy of the GNU General Pulbic Lincense\n# along with Jump2. If not, see .\n#=================================================================\n\n\"\"\"\nClasses for read structures.\n\"\"\"\n\n__all__=['convert_value','parse_multiline_string','parse_singletag',\n'parse_loop','parse_items','parse_block','parse_cif','format_symbol',\n'equival_pos','numbers_cal','lattice_vector','SpacegroupError',\n'SpacegroupNotFoundError','SpacegroupValueError']\n\nfrom sys import exit\nfrom cif import *\nclass ReadError(Exception):\n pass\n \nclass Read(object):\n \"\"\"\n reading structure\n \n arguments:\n file: path of structure. i.e. /home/xx/xx/POSCAR, POSCAR\n type: type of structure file. i.e. crystal: cif, poscar; molecule: xyz, mol....\n \n \"\"\"\n \n def __init__(self, file, type=None):\n self.file=file\n \n if type == None:\n if self.file.endswith('.cif'):\n self.type='cif'\n elif self.file.endswith('.xyz'):\n self.type='xyz'\n elif self.file.endswith('.mol'):\n self.type='mol'\n elif self.file.endswith('.vasp'):\n self.type = 'poscar'\n elif any(key in self.file.split('/')[-1]\n for key in ['POSCAR' or 'CONTCAR']):\n self.type='poscar'\n else:\n raise ReadError('please specify the type of file')\n elif type == 'cif':\n self.type='cif'\n elif type.lower() == 'poscar':\n self.type='poscar'\n elif type.lower() == 'xyz':\n self.type='xyz'\n elif type.lower() == 'mol':\n self.type='mol'\n else:\n raise ReadError('unknown type of file!')\n \n \n def getStructure(self):\n \"\"\"\n read structure\n \n returns:\n json's object of a structure\n \n \"\"\"\n if self.type == 'cif':\n return self.__readCIF()\n elif self.type == 'poscar':\n return self.__readPOSCAR()\n elif self.type == 'xyz':\n return self.__readXYZ()\n elif self.type == 'mol':\n return self.__readMOL()\n \n def __readCIF(self):\n \"\"\"\n read CIF file\n\n returns:\n cif: A dictionary including:\n lattice=[[x1,y1,z1],\n [x2,y2,z2],\n [x3,y3,z3]]\n elements=['Ca', 'Fe', 'Sb']\n numbers=[2, 8, 24]\n type= Direct\n positions=[[a1_x,a1_y,a1_z],\n [a2_x,a2_y,a2_z],\n [a3_x,a3_y,a3_z],\n ...]\n\n \"\"\"\n import numpy as np\n import math\n from spaceGroupD3 import spacegroups as SG\n cf=parse_cif(self.file)\n cb=cf[0][1]\n\n # lattice parameters\n aa=float(cb['_cell_length_a'])\n bb=float(cb['_cell_length_b'])\n cc=float(cb['_cell_length_c'])\n alpha=float(cb['_cell_angle_alpha'])\n beta=float(cb['_cell_angle_beta'])\n gamma=float(cb['_cell_angle_gamma'])\n alpha=alpha*(math.pi/180)\n beta=beta*(math.pi/180)\n gamma=gamma*(math.pi/180)\n\n # lattice vector\n lattice=[]\n lattice=lattice_vector(aa, bb, cc, alpha, beta, gamma)\n\n # elements\n elements=[]\n elements=cb['_atom_site_type_symbol']\n\n # space group number\n group_number=None\n if '_space_group.it_number' in cb:\n group_number=str(cb['_space_group.it_number'])\n elif '_space_group_it_number' in cb:\n group_number=str(cb['_space_group_it_number'])\n elif '_symmetry_int_tables_number' in cb:\n group_number=str(cb['_symmetry_int_tables_number'])\n\n # space group H-M symbol\n symbolHM=None\n if '_space_group.Patterson_name_h-m' in cb:\n symbolHM=format_symbol(cb['_space_group.patterson_name_h-m'])\n elif '_symmetry_space_group_name_h-m' in cb:\n symbolHM=format_symbol(cb['_symmetry_space_group_name_h-m'])\n\n # symmetry operations\n for name in ['_space_group_symop_operation_xyz',\n '_space_group_symop.operation_xyz',\n '_symmetry_equiv_pos_as_xyz']:\n if name in cb:\n sitesym=cb[name]\n break\n else:\n sitesym=None\n\n # positions\n positions=[]\n if sitesym:\n positions=equival_pos(sitesym, cb)\n elif symbolHM:\n if SG.get(symbolHM):\n positions=equival_pos(SG.get(symbolHM), cb)\n else:\n raise SpacegroupNotFoundError('invalid spacegroup %s, not found in data base' %\n (symbolHM,))\n elif group_number:\n positions=equival_pos(SG.get(group_number), cb)\n else:\n raise SpacegroupValueError('either *number* or *symbol* must be given for space group!')\n\n # numbers\n numbers=[]\n if '_atom_site_symmetry_multiplicity' in cb:\n numbers=cb['_atom_site_symmetry_multiplicity']\n elif sitesym:\n numbers=numbers_cal(sitesym, cb)\n elif symbolHM:\n numbers=numbers_cal(SG.get(symbolHM), cb)\n else:\n numbers=numbers_cal(SG.get(group_number), cb)\n\n # type\n type='Direct'\n\n lattice=np.array(lattice)\n elements=np.array(elements)\n numbers=np.array(numbers)\n positions=np.array(positions)\n\n cif={'lattice': lattice,\n 'elements': elements,\n 'numbers': numbers,\n 'type': type,\n 'positions': positions}\n\n return cif \n \n def __readPOSCAR(self): # only for VASP5.x (It means the file need to contain the element information)\n \"\"\"\n read POSCAR file\n \n poscar:\n comment: comment of the first line\n lattice=[[x1,y1,z1],\n [x2,y2,z2],\n [x3,y3,z3]]\n elements=['Ca', 'Fe', 'Sb']\n numbers=[2, 8, 24]\n type= Direct or Cartesian\n positions=[[a1_x,a1_y,a1_z],\n [a2_x,a2_y,a2_z],\n [a3_x,a3_y,a3_z],\n ...]\n constraints=[[T,T,T], # Selective dynamics (optional)\n [F,F,F],\n [T,F,T],\n ...]\n \n returns:\n json's object of a structure\n \n \"\"\"\n import numpy as np\n poscar=()\n input=open(self.file)\n \n # comment\n comment=''\n string=input.readline()\n if string != \"\":\n comment=string.split('\\n')[0]\n \n scale=float(input.readline())\n \n # lattice\n # ensure all structure's scale equal 1 inside the program \n lattice=[]\n for i in range(0,3):\n try:\n tmp=np.array([float(s0) for s0 in input.readline().split()])\n if tmp.shape[0] == 3:\n lattice.append(tmp*scale)\n else:\n print('lattice parameter is less than 3!')\n exit()\n except ValueError:\n print(\"can't transfer literal to float type!\")\n exit()\n lattice=np.array(lattice)\n \n # element VASP5.x\n # Note that:\n # need check symbol of element is valid by comparing the element table in jump2db\n elements=[]\n tmp=np.array(input.readline().split())\n for i in range(0,tmp.shape[0]):\n if not(tmp[i].isalpha()):\n print('elements contain non-alphabet!')\n exit()\n elements=tmp\n \n # numbers\n numbers=[]\n try:\n tmp=np.array([int(s0) for s0 in input.readline().split()])\n if elements.shape[0] != tmp.shape[0]:\n print(\"length of numbers don't match with that of elements\")\n exit()\n numbers=tmp\n except ValueError:\n print(\"can't transfer literal to int type!\")\n exit()\n \n \n tmp=input.readline()\n isConstraint=False\n type=''\n if tmp.lower().startswith('s'): # Selective dynamics\n isConstraint=True\n # type\n tmp=input.readline()\n if tmp.lower().startswith('c'):\n type='Cartesian'\n elif tmp.lower().startswith('d'):\n type='Direct'\n else:\n print('type of POSCAR is invalid')\n exit()\n # type \n elif tmp.lower().startswith('c'):\n type='Cartesian'\n elif tmp.lower().startswith('d'):\n type='Direct'\n else:\n print('type of POSCAR is invalid')\n exit()\n \n # position\n natoms=sum(numbers)\n positions=[]\n constraints=[]\n for i in range(0, natoms):\n try:\n string=input.readline().split()\n if (not isConstraint and len(string) <3) or (isConstraint and len(string) != 6):\n print('column of position not enough!')\n exit()\n tmp=np.array([float(s0) for s0 in string[:3]])\n positions.append(tmp)\n \n # constraint\n if isConstraint:\n tmp=np.array([False if s0.startswith('F') else True for s0 in string[3:6]])\n constraints.append(tmp)\n \n except ValueError:\n print(\"can't transfer literal to float type!\")\n exit()\n positions=np.array(positions)\n constraints=np.array((constraints))\n \n input.close()\n #poscar=(comment,lattice,elements,numbers,type,positions,constraints)\n poscar={'comment':comment,\n 'lattice':lattice,\n 'elements':elements,\n 'numbers':numbers,\n 'type':type,\n 'positions':positions,\n 'constraints':constraints}\n return poscar\n\n def __readXYZ(self):\n \"\"\"\n read xyz file\n \n poscar:\n elements=['Ca', 'Fe', 'Sb']\n numbers=[2, 8, 24]\n positions=[[a1_x,a1_y,a1_z],\n [a2_x,a2_y,a2_z],\n [a3_x,a3_y,a3_z],\n ...]\n Note: coordinate type of positions can only be Cartesian.\n \n returns:\n object of a structure\n \"\"\"\n import numpy as np\n xyz=()\n input=open(self.file)\n \n # natoms\n try:\n natoms=int(input.readline())\n except ValueError:\n return ValueError('invalid natoms in xyz file!')\n \n # comment\n comment=input.readline() # skip\n \n # atoms\n counter=0 # counter of atoms\n atoms={}\n string=input.readline()\n while(string):\n if string.split() != []: # skip blank line\n ntmp=string.split()[0] # atomic name\n try:\n ptmp=np.array([float(s0) for s0 in string.split()[1:]]) # atomic position\n except ValueError:\n raise ValueError('invalid atomic position in xyz file!')\n \n if ntmp in atoms.keys():\n value=atoms[ntmp]\n atoms[ntmp]=np.vstack((value,ptmp))\n else:\n atoms[ntmp]=ptmp\n counter=counter+1\n string=input.readline()\n \n if counter != natoms:\n raise ReadError(\"number of atoms doesn't match!\")\n \n # conversion format\n molecule={}\n molecule['elements']=np.array(atoms.keys())\n numbers=[]\n positions=[]\n for e in atoms.keys():\n dim=atoms[e].shape\n if len(dim) == 1 and dim[0] == 3:\n numbers.append(1)\n positions.append(atoms[e])\n elif len(dim) == 2 and dim[1] == 3:\n numbers.append(dim[0])\n for p in range(0,dim[0]):\n positions.append(atoms[e][p])\n else:\n raise ReadError('invalid atomic position!')\n \n molecule['numbers']=np.array(numbers)\n molecule['positions']=np.array(positions)\n \n return molecule\n \n def __readMOL(self):\n \"\"\"\n \"\"\"\n pass\n","sub_path":"boo/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":13938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"541433638","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nScript Header\n\n$Id: cmCC25518_3pcc_BS_IOT_Interop_128_FKSDeactivateCfwdNoAnswerFromPhone\n\nCopyright (c) 2016 Cisco Systems, Inc.\n\nName:\n cmCC25518_3pcc_BS_IOT_Interop_128_FKSDeactivateCfwdNoAnswerFromPhone.py\n\nPurpose:\n From the phone, set Call Forwarding No Answer to “OFF”.\n\nAuthor:\n Yashashwini M B(ymuddena@cisco.com)\n Modified by: Purushotham S (pshivara@cisco.com)\n\nReferences:\n US25518\n BW-SIPPhone-InteropTestPlan-R21.0\n\nDescription:\n This test case verifies that the DUT is able to synchronize a Call\n Forwarding No Answer status change from the phone to BroadWorks.\n\nTopology:\n 2 3pcc phones\n\nPass/Fail Criteria:\n 1. DUT sends a SUBSCRIBE with the Call Forwarding No Answer setting\n requested as “false”.\n 2. BroadWorks responds with a 200 OK.\n 3. BroadWorks sends a NOTIFY to the DUT with the Call Forwarding No\n Answer setting confirmed as “false”.\n 4. The DUT responds with a 200 OK.\n\nTest Steps:\n 1. Feature key sync is enabled on Phone A\n 2. From the phone A, set Call Forwarding No Answer to “OFF”.\n Verify:\n 1. From the phone, set Call Forwarding No Answer to “OFF”\n 1. BroadWorks Call Forwarding No Answer service for the DUT is “OFF”.\n Browse to BroadWorks user → Incoming Calls → Call Forwarding No\n Answer to confirm.\n 2. Verify the SIP signaling to and from the DUT\n 1. DUT sends a SUBSCRIBE with the Call Forwarding No Answer setting\n requested as “false”.\n 2. BroadWorks responds with a 200 OK.\n 3. BroadWorks sends a NOTIFY to the DUT with the Call Forwarding No\n Answer setting confirmed as “false”.\n 4. The DUT responds with a 200 OK.\n\n Notes:\n\n Known Bugs:\n\"\"\"\n\nimport tng\nimport logging\nfrom tng_sl.contrib.mpp.phone_line_reg_helper import PhoneLineRegHelper\nfrom tng_sl.contrib.mpp.phone_line_reg_helper import PhoneConfigHelper\nfrom tng_sl.contrib.mpp.tshark_helper import TsharkHelper\nfrom tng_sl.contrib.setup_helper import SetupHelpersTestCase\nfrom tng_sl.contrib.mpp.broadsoft.broadsoft_config import BroadsoftConfig\nfrom tng.frontend.timing import wait\n\nlog = logging.getLogger('FKSDeactivateCallForwardNoAnswerFromPhone')\n\n\nclass FKSDeactivateCallForwardNoAnswerFromPhone(\n SetupHelpersTestCase, tng.api.TestCase):\n\n helpers = (PhoneConfigHelper, PhoneLineRegHelper, TsharkHelper)\n helper_num_devices = 2\n\n def setUp(self):\n log.info(\"Start of setUp\")\n\n self.serverproxy = self.toolkit.get_test_env_info(\n section='bsoft', parameter_name=\"as_ip_addr\")\n self.xsi_user_id1 = self.toolkit.get_test_env_info(\n section='bsoft', parameter_name=\"xsi_user_id1\")\n\n log.info('Stop tshark on Linux')\n self.addCleanup(self.tshark.tshark_stop)\n\n def phone_cleanup():\n log.info(\"Start of tearDown\")\n log.info(\"Disable Feature Key sync in Phone webpage\")\n self.oPhone1.ui.set_web_parameter_http(\n FKS_setting=['Ext 1', 'Feature Key Sync', 0])\n # verify FKS_setting is disabled\n verify_fks = self.oPhone1.ui.get_web_parameter_http(\n 'Ext 1', 'Feature Key Sync')\n self.assertEqual(\"0\", verify_fks)\n self.addCleanup(phone_cleanup)\n\n def test_fks_deactivate_cfwd_no_answer_from_phone(self):\n log.info(\"Start of test_fks_deactivate_cfwd_no_answer_from_phone\")\n\n log.info(\"Enable Feature Key sync in Phone webpage\")\n self.oPhone1.ui.set_web_parameter_http(\n FKS_setting=['Ext 1', 'Feature Key Sync', 1])\n # verify FKS_setting is enabled\n verify_fks = self.oPhone1.ui.get_web_parameter_http(\n 'Ext 1', 'Feature Key Sync')\n self.assertEqual(\"1\", verify_fks)\n\n log.info(\"Enable Cfwd_no_answer from Phone1\")\n self.oPhone1.ccapi.sendDFKSUpdate(\n 0, 0, 0, '', 0, '', 1, self.user_id2, 4, 2, '0', 1, 1)\n wait(5, \"Cfwd_no_answer enabled on Phone1\")\n\n log.info(\"Check Cfwd_no_answer enabled on server\")\n self.broadsoft = BroadsoftConfig()\n\n if 'true' in self.broadsoft.get_call_forward_type_status(\n cfw_type='CallForwardingNoAnswer',\n user_id_proxy=self.xsi_user_id1, user_id=self.user_id1):\n log.info(\"Cfwd_no_answer enabled on server\")\n else:\n raise AssertionError(\"Cfwd_no_answer is not enabled in server\")\n\n log.info('Start tshark on linux')\n dut_ip = self.oPhone1.get_phone_ip()\n filter_cmd = 'port sip and host {}'.format(dut_ip)\n capture_file = self.tshark.tshark_start(filter_cmd)\n\n log.info(\"Disable Cfwd_no_answer from Phone1\")\n self.oPhone1.ccapi.sendDFKSUpdate(\n 0, 0, 0, '', 0, '', 0, '', 4, 2, '0', 1, 1)\n wait(5, \"Cfwd_no_answer disabled on Phone1\")\n\n log.info(\"Check Cfwd_no_answer disabled on server\")\n self.broadsoft = BroadsoftConfig()\n if 'false' in self.broadsoft.get_call_forward_type_status(\n cfw_type='CallForwardingNoAnswer',\n user_id_proxy=self.xsi_user_id1, user_id=self.user_id1):\n log.info(\"Cfwd_no_answer disabled on server\")\n else:\n raise AssertionError(\"Cfwd_no_answer is not enabled in server\")\n\n # analyse tshark capture\n log.info(\"Get CSeq and Call-ID of SUBSCRIBE\")\n subscribe_cseq, subscribe_call_id = (\n self.tshark.tshark_get_string_cseq_call_id(\n capture_file, src_ip=dut_ip, dst_ip=self.serverproxy,\n search_string='as-feature-event', method='SUBSCRIBE',\n header='Event'))\n\n log.info(\n \"DUT sends a SUBSCRIBE with the Call Forwarding No Answer\"\n \"setting requested as “false”\")\n self.tshark.tshark_check_string_in_packet_bytes(\n capture_file, src_ip=dut_ip, dst_ip=self.serverproxy,\n cseq=subscribe_cseq, call_id=subscribe_call_id,\n search_string='false',\n protocol='sip', method='SUBSCRIBE')\n\n log.info(\"Check BroadWorks responds with 200 OK for SUBSCRIBE\")\n self.tshark.tshark_check_string_in_message(\n capture_file, '200 OK', '200 OK', src_ip=self.serverproxy,\n dst_ip=dut_ip, cseq=subscribe_cseq, call_id=subscribe_call_id,\n header='Status-Line')\n\n log.info(\n \"Get CSeq and Call-ID of NOTIFY for Call Forwarding No Answer\")\n notify_cseq, notify_call_id = (\n self.tshark.tshark_get_method_cseq_call_id(\n capture_file, src_ip=self.serverproxy, dest_ip=dut_ip,\n method='NOTIFY', protocol='sip'))\n\n log.info(\n \"Check BroadWorks sends a NOTIFY to the DUT with the Call\"\n \"Forwarding No Answer setting confirmed as “false”\")\n self.tshark.tshark_check_string_in_packet_bytes(\n capture_file, src_ip=self.serverproxy, dst_ip=dut_ip,\n cseq=notify_cseq, call_id=notify_call_id,\n search_string='false',\n protocol='sip', method='NOTIFY')\n\n log.info(\"Check the DUT responds with a 200 OK\")\n self.tshark.tshark_check_string_in_message(\n capture_file, '200 OK', '200 OK', src_ip=dut_ip,\n dst_ip=self.serverproxy, cseq=notify_cseq, call_id=notify_call_id,\n header='Status-Line')\n\n log.info(\n \"Successfully verified traces for \"\n \"test_fks_deactivate_cfwd_no_answer_from_phone\")\n log.info(\"End of test_fks_deactivate_cfwd_no_answer_from_phone\")\n\n\n# this is called by 'tng run'\ndef main():\n tng.api.runner()\n\nif __name__ == '__main__':\n tng.run(main)\n","sub_path":"common/IOT/Broadsoft_Interop/section_8/cc_basic/cmCC25518_3pcc_BS_IOT_Interop_128_FKSDeactivateCfwdNoAnswerFromPhone.py","file_name":"cmCC25518_3pcc_BS_IOT_Interop_128_FKSDeactivateCfwdNoAnswerFromPhone.py","file_ext":"py","file_size_in_byte":7806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"190445370","text":"#!encoding:utf-8\r\nfrom __future__ import unicode_literals\r\n\r\nfrom django.db import models\r\nfrom vuser.models import User\r\nfrom vcourse.models import Course, Video, Technology\r\n\r\n\r\n# Create your models here.\r\nclass Score(models.Model):\r\n \"\"\"用户得分记录表\"\"\"\r\n createtime = models.CharField('获得积分的日期', max_length=20)\r\n user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='用户ID')\r\n technology = models.ForeignKey(Technology, on_delete=models.CASCADE, null=True, verbose_name='类别ID', blank=True)\r\n score = models.IntegerField('获得积分')\r\n\r\n def __unicode__(self):\r\n return self.user.nickname\r\n\r\n\r\nclass WatchRecord(models.Model):\r\n \"\"\"用户观看记录表\"\"\"\r\n STATUS = (\r\n (0, '已看完'),\r\n (1, '未看完')\r\n )\r\n user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='用户ID')\r\n video = models.ForeignKey(Video, on_delete=models.CASCADE, null=True, verbose_name='视频ID')\r\n course = models.ForeignKey(Course, on_delete=models.CASCADE, verbose_name='课程ID')\r\n video_process = models.IntegerField('观看时间进度')\r\n video_time = models.IntegerField('视频长度', default=0)\r\n status = models.IntegerField('观看状态', choices=STATUS)\r\n createtime = models.CharField('记录时间', max_length=20)\r\n\r\n def __unicode__(self):\r\n return self.user.nickname\r\n\r\n\r\nclass WatchCourse(models.Model):\r\n \"\"\"记录用户那些课程都已经观看完成\"\"\"\r\n user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='用户ID')\r\n course = models.ForeignKey(Course, on_delete=models.CASCADE, verbose_name='课程ID')\r\n createtime = models.CharField('记录时间', max_length=20, default='2015-09-08 12:00:00')\r\n\r\n\r\nclass Watchtime(models.Model):\r\n \"\"\"记录用户观看视频时长\"\"\"\r\n createtime = models.CharField('日期', max_length=10)\r\n userid = models.IntegerField('用户ID')\r\n time = models.IntegerField('学习时长')\r\n\r\n\r\nclass WatchTimu(models.Model):\r\n \"\"\"记录用户习题情况\"\"\"\r\n createtime = models.CharField('日期', max_length=10)\r\n userid = models.IntegerField('用户ID')\r\n timuid = models.IntegerField('题目ID')\r\n courseid = models.IntegerField('课程ID')\r\n status = models.CharField(verbose_name='习题状态', max_length=1) #0正确, 1错误\r\n skill = models.CharField(verbose_name='相关技能点', max_length=100, default='')\r\n\r\n\r\nclass Watchface(models.Model):\r\n \"\"\"记录脸部表情\"\"\"\r\n userid = models.FloatField()\r\n joy = models.FloatField()\r\n engagement = models.FloatField()\r\n sadness = models.FloatField()\r\n anger = models.FloatField()\r\n surprise = models.FloatField()\r\n fear = models.FloatField()\r\n valence = models.FloatField()\r\n contempt = models.FloatField()\r\n vtime = models.FloatField()\r\n disgust = models.FloatField()\r\n\r\n","sub_path":"vfast/vrecord/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"485613580","text":"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Generates pybind11 bindings code.\"\"\"\n\nfrom typing import Text\n\nfrom clif.protos import ast_pb2\nfrom clif.pybind11 import classes\nfrom clif.pybind11 import enums\nfrom clif.pybind11 import function\nfrom clif.pybind11 import utils\n\nI = utils.I\n\n\nclass ModuleGenerator(object):\n \"\"\"A class that generates pybind11 bindings code from CLIF ast.\"\"\"\n\n def __init__(self, ast: ast_pb2.AST, module_name: Text):\n self._ast = ast\n self._module_name = module_name\n\n def generate_from(self, ast: ast_pb2.AST):\n \"\"\"Generates pybind11 bindings code from CLIF ast.\n\n Args:\n ast: CLIF ast protobuf.\n\n Yields:\n Generated pybind11 bindings code.\n \"\"\"\n for s in self._generate_headlines():\n yield s\n yield f'PYBIND11_MODULE({self._module_name}, m) {{'\n yield I+('m.doc() = \"CLIF generated pybind11-based module for '\n f'{ast.source}\";')\n for decl in ast.decls:\n if decl.decltype == ast_pb2.Decl.Type.FUNC:\n for s in function.generate_from(decl.func):\n yield s\n elif decl.decltype == ast_pb2.Decl.Type.CONST:\n for s in self._generate_const_variables(decl.const):\n yield s\n elif decl.decltype == ast_pb2.Decl.Type.CLASS:\n for s in classes.generate_from(decl.class_, 'm'):\n yield s\n elif decl.decltype == ast_pb2.Decl.Type.ENUM:\n for s in enums.generate_from(decl.enum, 'm'):\n yield s\n yield ''\n yield '}'\n\n def _generate_headlines(self):\n \"\"\"Generates #includes and headers.\"\"\"\n includes = set()\n for decl in self._ast.decls:\n includes.add(decl.cpp_file)\n if decl.decltype == ast_pb2.Decl.Type.CONST:\n self._generate_const_variables_headers(decl.const, includes)\n for include in includes:\n yield f'#include \"{include}\"'\n yield '#include \"third_party/pybind11/include/pybind11/pybind11.h\"'\n yield ''\n yield 'namespace py = pybind11;'\n yield ''\n\n def _generate_const_variables_headers(self, const_decl: ast_pb2.ConstDecl,\n includes: set):\n if const_decl.type.lang_type == 'complex':\n includes.add('third_party/pybind11/include/pybind11/complex.h')\n if (const_decl.type.lang_type.startswith('list<') or\n const_decl.type.lang_type.startswith('dict<') or\n const_decl.type.lang_type.startswith('set<')):\n includes.add('third_party/pybind11/include/pybind11/stl.h')\n\n def _generate_const_variables(self, const_decl: ast_pb2.ConstDecl):\n \"\"\"Generates variables.\"\"\"\n lang_type = const_decl.type.lang_type\n\n if (lang_type in {'int', 'float', 'double', 'bool', 'str'} or\n lang_type.startswith('tuple<')):\n const_def = I + (f'm.attr(\"{const_decl.name.native}\") = '\n f'{const_decl.name.cpp_name};')\n else:\n const_def = I + (f'm.attr(\"{const_decl.name.native}\") = '\n f'py::cast({const_decl.name.cpp_name});')\n\n yield const_def\n\n\ndef write_to(channel, lines):\n \"\"\"Writes the generated code to files.\"\"\"\n for s in lines:\n channel.write(s)\n channel.write('\\n')\n","sub_path":"clif/pybind11/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"403978237","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 5 12:31:23 2019\n\n@author: Wladek\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sklearn\nimport sklearn.datasets\nimport sklearn.linear_model\nfrom numpy_neural_network.planar_utils import plot_decision_boundary\nfrom numpy_neural_network.NN_model import nn_model\nfrom numpy_neural_network.NN_function import forward_propagation\n\n\ndef predict(parameters, X):\n A2, cache = forward_propagation(X, parameters)\n return A2 > 0.5\n\n\ndef load_extra_datasets():\n N = 200\n gaussian_quantiles = sklearn.datasets.make_gaussian_quantiles(mean=None,\n cov=0.7, n_samples=N, n_features=2, n_classes=2,\n shuffle=True, random_state=None)\n return gaussian_quantiles\n\n\ngaussian_quantiles = load_extra_datasets()\nX, Y = gaussian_quantiles\nX, Y = X.T, Y.reshape(1, Y.shape[0])\n# Visualize the data\nYdraw = Y[0]\nplt.scatter(X[0, :], X[1, :], c=Ydraw, s=40, cmap=plt.cm.Spectral)\n# %%\nclf = sklearn.linear_model.LogisticRegressionCV()\nclf.fit(X.T, Y.T)\nplot_decision_boundary(lambda x: clf.predict(x), X, Y)\nplt.title(\"Logistic Regression\")\nplt.subplot()\n# Print accuracy\nLR_predictions = clf.predict(X.T)\nprint('Accuracy of logistic regression: %d ' % float(\n (np.dot(Y, LR_predictions) + np.dot(1 - Y, 1 - LR_predictions)) / float(Y.size) * 100) +\n '% ' + \"(percentage of correctly labelled datapoints)\")\n# %%\n# Build a model with a n_h-dimensional hidden layer\nparameters = nn_model(X, Y, n_h=3, num_iterations=1000, print_cost=True)\n# Plot the decision boundary\nplot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)\nplt.title(\"Decision Boundary for hidden layer size \" + str(4))\n","sub_path":"numpy_neural_network/NNin_numpy.py","file_name":"NNin_numpy.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"349995671","text":"#!/usr/bin/env python3\n# TODO\n# tensorboard - add some more stats\n# normalization, regularization\n\nimport argparse\nfrom functools import partial\nfrom glob import glob\nimport os\nimport random\nimport numpy as np\nfrom scipy.misc import imresize, imread\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nfrom tensorflow.contrib.slim.nets import inception\nimport tensorflow.contrib.slim as slim\n\ndef chunker(length, chunk_size):\n return (np.arange(pos, min(pos + chunk_size, length)) \\\n for pos in range(0, length, chunk_size))\n\ndef filter_paths(path, dirs):\n return [f for d in dirs for f in glob(os.path.join(path, d + '/*.jpg'))]\n\ndef load_images(paths, resize=None):\n if resize is None:\n return np.array([imread(path) / 255 for path in paths])\n return np.array([imresize(imread(path), resize) / 255 for path in paths])\n\ndef extract_dir(path):\n # extract the last directory name\n return os.path.basename(os.path.dirname(path))\n\ndef load_inception(n_classes):\n X = tf.placeholder(tf.float32, shape=(None, 299, 299, 3))\n\n training = tf.placeholder_with_default(False, shape=[])\n with slim.arg_scope(inception.inception_v3_arg_scope()):\n logits, end_points = inception.inception_v3(X, num_classes=1001,\n is_training=training)\n # for i in end_points: print(i)\n # change shape\n inc_saver = tf.train.Saver(save_relative_paths=True)\n prelogits = tf.squeeze(end_points['PreLogits'], axis=[1,2])\n gradient_stop = tf.stop_gradient(prelogits)\n a = tf.layers.dense(gradient_stop, 500, activation=tf.nn.relu)\n logits = tf.layers.dense(a, n_classes)\n\n return X, inc_saver, logits, prelogits\n\ndef define_nn(n_classes):\n X = tf.placeholder(tf.float32, shape=(None, 100, 100, 3))\n\n with tf.name_scope('layers'):\n conv = partial(tf.layers.conv2d, kernel_size=3, padding='same',\n activation=tf.nn.relu)\n\n a = tf.layers.average_pooling2d(X, 4, 4)\n a = conv(a, filters=50)\n a = tf.layers.max_pooling2d(a, 2, 2)\n a = conv(a, filters=100)\n pre = a # last frozen layer - needed for caching\n saver = tf.train.Saver()\n\n a = tf.stop_gradient(a) # this must be here even if pre is cached\n a = tf.layers.max_pooling2d(a, 2, 2)\n a = conv(a, filters=150, kernel_size=1)\n a = conv(a, filters=150)\n\n a = tf.contrib.layers.flatten(a)\n a = tf.layers.dense(a, 500, activation=tf.nn.relu)\n # add more layers, move the saver\n\n logits = tf.layers.dense(a, n_classes)\n return X, saver, logits, pre\n\ndef training(logits):\n y = tf.placeholder(tf.int32, shape=(None))\n\n with tf.name_scope('training'):\n xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,\n logits=logits)\n loss = tf.reduce_mean(xentropy)\n optimizer = tf.train.MomentumOptimizer(learning_rate=0.01,\n momentum=0.9, use_nesterov=True).minimize(loss)\n\n with tf.name_scope('eval'):\n correct = tf.nn.in_top_k(logits, y, 1)\n # correct = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n \n tf.summary.scalar('loss', loss)\n tf.summary.scalar('accuracy', accuracy)\n tf.summary.histogram('cross_entropy', xentropy)\n\n return y, optimizer, accuracy\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-e', '--epochs', type=int, default=10)\n parser.add_argument('-b', '--batch-size', type=int, default=256)\n parser.add_argument('-a', '--sample', type=int,\n help='validation set sample size')\n parser.add_argument('-s', '--save', action='store_true')\n parser.add_argument('-r', '--restore', action='store_true')\n parser.add_argument('--early', type=int, help='early stopping rounds')\n parser.add_argument('--full', action='store_true', help='use all classes')\n parser.add_argument('-o','--output', type=str, default='model.ckpt')\n parser.add_argument('-i','--input', type=str, default='model.ckpt')\n parser.add_argument('--inception', action='store_true')\n parser.add_argument('--cache', action='store_true')\n args = parser.parse_args()\n\n train_path = 'fruits-360/Training'\n valid_path = 'fruits-360/Test'\n\n if args.full:\n labels = {d:i for i,d in enumerate(os.listdir(train_path))}\n paths = np.array([f for f in glob(os.path.join(train_path, '*/*.jpg'))])\n if args.sample:\n n = args.sample\n valid_paths = random.sample(filter_paths(valid_path, dirs), n)\n else:\n valid_paths = [f for f in glob(os.path.join(valid_path, '*/*.jpg'))]\n else:\n # fruit = ['Banana', 'Avocado', 'Cocos']\n # dirs = [d for d in os.listdir(train_path) if d in fruit]\n dirs = [d for d in os.listdir(train_path) if d.startswith('Apple')][:6]\n # map labels\n labels = {cdir:i for i,cdir in enumerate(dirs)}\n paths = np.array(filter_paths(train_path, dirs))\n valid_paths = filter_paths(valid_path, dirs)\n \n n_classes = len(labels)\n resize = None\n np.random.shuffle(paths)\n\n if args.inception:\n X, saver, logits, pre = load_inception(n_classes)\n resize = (299, 299)\n else:\n X, saver, logits, pre = define_nn(n_classes)\n\n y, opt, acc = training(logits)\n merged = tf.summary.merge_all()\n init = tf.global_variables_initializer()\n\n y_valid = np.array([labels[extract_dir(i)] for i in valid_paths])\n y_train = np.array([labels[extract_dir(i)] for i in paths])\n X_valid = load_images(valid_paths, resize)\n n_train = paths.shape[0]\n print('train size:', n_train, 'valid size:', X_valid.shape[0])\n\n with tf.Session() as sess:\n init.run()\n train_writer = tf.summary.FileWriter('./viz/train', sess.graph)\n valid_writer = tf.summary.FileWriter('./viz/valid', sess.graph)\n\n if args.inception:\n # load inception model weights\n saver.restore(sess, './inception_v3.ckpt')\n elif args.restore:\n saver.restore(sess, tf.train.latest_checkpoint('./tmp/'))\n\n if args.cache:\n shape = pre.get_shape().as_list()\n shape[0] = n_train\n X_train = np.zeros(shape)\n\n # cache training & validation data\n for chunk in chunker(n_train, 500):\n X_batch = load_images(paths[chunk], resize=resize)\n X_train[chunk] = sess.run([pre], feed_dict={X: X_batch})\n\n X_valid = sess.run(pre, feed_dict={X: X_valid})\n # change placeholder to the frozen layer\n X = pre\n\n cnt = 0\n for e in range(args.epochs):\n for chunk in chunker(n_train, args.batch_size):\n y_batch = y_train[chunk]\n\n if args.cache: \n X_batch = X_train[chunk]\n else:\n X_batch = load_images(paths[chunk], resize=resize)\n\n feed_dict = {X: X_batch, y: y_batch}\n summary, _ = sess.run([merged, opt], feed_dict=feed_dict)\n\n cnt += 1\n train_writer.add_summary(summary, cnt)\n\n feed_dict = {X: X_valid, y: y_valid}\n summary, score = sess.run([merged, acc], feed_dict=feed_dict)\n\n valid_writer.add_summary(summary, cnt)\n \n print('Epoch: {:<4} Accuracy: {:>8.4f}'.format(e, score))\n\n if args.save:\n saver.save(sess, './tmp/model.ckpt')\n\nif __name__ == '__main__':\n main()","sub_path":"fruit/cnn_model.py","file_name":"cnn_model.py","file_ext":"py","file_size_in_byte":7551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"452717181","text":"from RULEngine.Util.Pose import Pose\nfrom RULEngine.Util.Position import Position\nfrom typing import Union\n\n\nclass SpeedPose(Pose):\n\n def __init__(self, *args):\n position, orientation = self._pose_builder(args)\n self._orientation = orientation\n self._position = position\n\n @property\n def orientation(self):\n return self._orientation\n\n @orientation.setter\n def orientation(self, angular_speed):\n self._orientation = float(angular_speed)\n\n def __add__(self, other: Union['SpeedPose', Position]):\n if isinstance(other, SpeedPose):\n res = SpeedPose(self.position + other.position, self.orientation + other.orientation)\n elif isinstance(other, Position):\n res = SpeedPose(self.position + other, self.orientation)\n else:\n raise TypeError\n return res\n\n def __sub__(self, other: Union['SpeedPose', Position]):\n if isinstance(other, SpeedPose):\n res = SpeedPose(self.position - other.position, self.orientation - other.orientation)\n elif isinstance(other, Position):\n res = SpeedPose(self.position - other, self.orientation)\n else:\n raise TypeError\n return res\n\n def __eq__(self, other: Union['SpeedPose', Position]):\n if isinstance(other, Position):\n return self.position == other\n elif isinstance(other, SpeedPose):\n orientation_equal = self.orientation == other.orientation\n position_equal = self.position == other.position\n return position_equal and orientation_equal\n else:\n raise TypeError\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def scale(self, value):\n return SpeedPose(self.position * value, self.orientation)\n\n","sub_path":"RULEngine/Util/SpeedPose.py","file_name":"SpeedPose.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"592906641","text":"from z3 import *\nfrom box import Z3Box\nfrom bin import Z3Bin\nfrom grid import Grid\n\ndef place(nodes):\n boxes = []\n min_area = 0\n\n for node in nodes:\n box = Z3Box.from_dict(node)\n min_area += box.area()\n boxes.append(box)\n\n pcb = Z3Bin()\n\n s = Solver()\n\n s.add(pcb.range_constraint())\n\n for i, box in enumerate(boxes):\n # Add rotation constraints\n s.add(box.rotation_constraint())\n # Constrain position to be on the pcb\n s.add(box.range_constraint(pcb))\n for j in range(i):\n # Add non overlapping constraint\n s.add(box.overlap_constraint(boxes[j]))\n\n # Project constraints:\n #s.add(pcb.var_width == 170)\n #s.add(pcb.var_height == 50)\n s.add(pcb.area_constraint(min_area * 1.5))\n\n #s.add(boxes[0].fix_position_constraint(*pcb.var_center()))\n\n if s.check() == sat:\n model = s.model()\n\n pcb.eval(model)\n print(str(pcb))\n\n grid = Grid(*pcb.dim())\n\n result = []\n for box in boxes:\n box.eval(model)\n print(str(box))\n result.append(box.to_dict())\n grid.add_box(box)\n\n print(str(grid))\n return result\n else:\n print('unsat')\n return []\n\n\nif __name__ == '__main__':\n import argparse\n import json\n\n parser = argparse.ArgumentParser(description='SMT-based, constrained placement')\n\n parser.add_argument('filename', type=str)\n\n args, unknown = parser.parse_known_args()\n\n fileout = args.filename.split('.')\n fileout[-1] = 'out.pcpl'\n fileout = '.'.join(fileout)\n\n input = \"\"\n with open(args.filename) as f:\n input = json.loads(f.read())\n\n result = place(input)\n\n with open(fileout, 'w') as f:\n print(json.dumps(result), file=f)\n","sub_path":"placer/place.py","file_name":"place.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"301090234","text":"# -*- encoding: utf-8 -*-\nfrom django.contrib.auth.decorators import permission_required\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom gestiones.Producto.altaplato.forms import altaPlatoForm\nfrom gestiones.Producto.producto.models import Plato\n\n\n@permission_required('Administrador.is_admin', login_url=\"login\")\ndef altaplato(request):\n if request.method == 'POST':\n\n formulario = altaPlatoForm(request.POST)\n\n if formulario.is_valid():\n nombre = formulario.cleaned_data['nombre']\n precio = formulario.cleaned_data['precio']\n stock = formulario.cleaned_data['stock']\n descripcion = formulario.cleaned_data['descripcion']\n promocion = formulario.cleaned_data['enPromocion']\n descuento = formulario.cleaned_data['descuento']\n seccion = formulario.cleaned_data['seccion']\n\n plato = Plato.objects.create(nombre=nombre, precio=precio, stock=stock, descripcion=descripcion,\n enPromocion=promocion, descuento=descuento, seccion=seccion)\n seccion.platoss.add(plato)\n seccion.save()\n return render_to_response('Producto/altaplato/altaplatoexito.html', {},\n context_instance=RequestContext(request))\n\n else:\n return render_to_response('Producto/altaplato/altaplato.html', {'formulario': formulario},\n context_instance=RequestContext(request))\n else:\n formulario = altaPlatoForm()\n return render_to_response('Producto/altaplato/altaplato.html', {'formulario': formulario},\n context_instance=RequestContext(request))","sub_path":"gestiones/Producto/altaplato/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"72287403","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/8/3 13:57\n# @Author : xmskf\n# @Email : 84887867@qq.com\n# @File : CustomClass_Test.py\n# @Software: PyCharm\n\n# __str__使用\n\n\nclass Student(object):\n def __init__(self, name):\n self.name = name\n\n\nprint(Student('xmskf')) # <__main__.Student object at 0x000001525E5F99B0>\n\n\n# 只需要定义好__str__()方法,返回一个好看的字符串\nclass Student(object):\n def __init__(self, name):\n self.name = name\n\n def __str__(self):\n return 'Student object (name: %s)' % self.name\n\n\nprint(Student('xmskf'))\n\ns = Student('xnskf3')\nprint(s)\n\n\n# __iter__\n# 如果一个类想被用于for ... in循环,类似list或tuple那样,就必须实现一个__iter__()方法,\n# 该方法返回一个迭代对象,然后,Python的for循环就会不断调用该迭代对象的__next__()方法拿到循环的下一个值,\n# 直到遇到StopIteration错误时退出循环。\n\nclass Fib(object):\n def __init__(self):\n self.a, self.b = 0, 1 # 初始化两个计数器a,b\n\n def __iter__(self):\n return self\n\n def __next__(self):\n self.a, self.b = self.b, self.a + self.b\n if self.a > 10000:\n raise StopIteration()\n return self.a\n\n\nfor n in Fib():\n print(n)\n\n\n# __getitem__\n# Fib实例虽然能作用于for循环,看起来和list有点像,但是,把它当成list来使用还是不行,比如,取第5个元素:\n# >>> Fib()[5]\n# Traceback (most recent call last):\n\nclass Fib(object):\n def __getitem__(self, n):\n a, b = 1, 1\n for x in range(n):\n a, b = b, a + b\n return a\n\n\nf = Fib()\nprint(f.__getitem__(10))\nprint(f[100])\n\n# list有个神奇的切片方法:\ns = list(range(100))[1:50:2]\nprint(s)\n\n\nclass Fib(object):\n def __getitem__(self, n): # n是索引\n if isinstance(n, int):\n a, b = 1, 1\n for x in range(n):\n a, b = b, a + b\n return a\n if isinstance(n, slice): # n是切片\n start = n.start # n.start 是切片的头\n stop = n.stop # n.stop 是切片的尾\n if start is None:\n start = 0\n a, b = 1, 1\n L = []\n for x in range(stop):\n if x >= start:\n L.append(a)\n a, b = b, a + b\n return L\n\n\nf = Fib()\nprint(f[0:5])\n\n\n# 现在试试Fib的切片:\n#\n# >>> f = Fib()\n# >>> f[0:5]\n# [1, 1, 2, 3, 5]\n# >>> f[:10]\n# [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]\n# 但是没有对step参数作处理:\n#\n# >>> f[:10:2]\n# [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\n# 也没有对负数作处理,所以,要正确实现一个__getitem__()还是有很多工作要做的。\n#\n# 此外,如果把对象看成dict,__getitem__()的参数也可能是一个可以作key的object,例如str。\n# #\n# # 与之对应的是__setitem__()方法,把对象视作list或dict来对集合赋值。最后,还有一个__delitem__()方法,用于删除某个元素。\n# #\n# # 总之,通过上面的方法,我们自己定义的类表现得和Python自带的list、tuple、dict没什么区别,这完全归功于动态语言的“鸭子类型”,不需要强制继承某个��口。\n\n# __getattr__\n# 正常情况下,当我们调用类的方法或属性时,如果不存在,就会报错。比如定义Student类:\nclass Student(object):\n def __init__(self):\n self.name = 'xmskf'\n\n\n# 调用name属性,没问题,但是,调用不存在的score属性,就有问题了:\n# >>> s = Student()\n# >>> print(s.name)\n# xmskf\n# >>> print(s.score)\n# Traceback (most recent call last):\n# AttributeError: 'Student' object has no attribute 'score'\n\n\nclass Student4(object):\n def __init__(self):\n self.name = 'xmskf'\n\n def __getattr__(self, item):\n if item == 'socre':\n return 99\n if item == 'age':\n return lambda: 25\n\n\n# 当调用不存在的属性时,比如score,Python解释器会试图调用__getattr__(self, 'score')来尝试获得属性,\n# 这样,我们就有机会返回score的值:\ns = Student4()\nprint(s.socre)\nprint(s.age())\n\n\n# 注意,只有在没有找到属性的情况下,才调用__getattr__,已有的属性,比如name,不会在__getattr__中查找。\n# 此外,注意到任意调用如s.abc都会返回None,这是因为我们定义的__getattr__默认返回就是None。\n# 要让class只响应特定的几个属性,我们就要按照约定,抛出AttributeError的错误:\n\nclass Student(object):\n def __getattr__(self, item):\n if item == 'age':\n return lambda: 25\n raise AttributeError('\\Student\\' object has no attribute \\' %s \\'' % item)\n\n\ns = Student()\n\n\n# print(s.aaa)#AttributeError: \\Student' object has no attribute ' aaa '\n\n# 现在很多网站都搞REST API,比如新浪微博、豆瓣啥的,调用API的URL类似:\n#\n# http://api.server/user/friends\n# http://api.server/user/timeline/list\n# 如果要写SDK,给每个URL对应的API都写一个方法,那得累死,而且,API一旦改动,SDK也要改。\n#\n# 利用完全动态的__getattr__,我们可以写出一个链式调用:\n\nclass Chain(object):\n\n def __init__(self, path=''):\n self._path = path\n\n def __getattr__(self, path):\n return Chain('%s/%s' % (self._path, path))\n\n def __str__(self):\n return self._path\n\n __repr__ = __str__\n\n\ns = Chain().status.user.timeline.list\nprint(s)\n\n\n# __call__\n# 一个对象实例可以有自己的属性和方法,当我们调用实例方法时,我们用instance.method()来调用。\n# 能不能直接在实例本身上调用呢?在Python中,答案是肯定的\n\n# 任何类,只需要定义一个__call__()方法,就可以直接对实例进行调用。请看示例:\nclass Student(object):\n def __init__(self, name):\n self.name = name\n\n def __call__(self):\n print('My name is %s' % self.name)\n\n\ns = Student('xmskf')\ns()\n\n# __call__()还可以定义参数。对实例进行直接调用就好比对一个函数进行调用一样,\n# 所以你完全可以把对象看成函数,把函数看成对象,因为这两者之间本来就没啥根本的区别。\n\n# 怎么判断一个变量是对象还是函数呢?\n# 其实,更多的时候,我们需要判断一个对象是否能被调用,\n# 能被调用的对象就是一个Callable对象,比如函数和我们上面定义的带有__call__()的类实例:\n\nprint(callable(Student('aa'))) # True\nprint(callable(max)) # True\nprint(callable([1, 2, 3])) # Falase\nprint(callable(None)) # False\nprint(callable('str')) # False\n","sub_path":"src/CustomClass_Test.py","file_name":"CustomClass_Test.py","file_ext":"py","file_size_in_byte":6648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"148566555","text":"# coding: utf-8\n##################################################\n##################################################\n# Copyright (C) 2018 Marcus Wieder \n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n##################################################\n##################################################\n\n# one python package has to be installed for mutating: parmed\n\nimport parmed as pm\nimport sys\nimport os\nfrom IPython.display import SVG, display\nfrom parmed import unit as u\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n\ndef set_up_psf(psf):\n psf.number_of_dummys = 0\n for atom in psf.atoms:\n atom.dummy_type = atom.type\n atom.dummy_counter = -1\n\ndef write_psf(psf, output_file_base, tlc, vacuum = False):\n if vacuum == True:\n for atom in psf.atoms:\n if atom.dummy_type.startswith('DD'):\n atom.save_type = atom.type\n atom.type = atom.dummy_type \n psf_vacuum = psf[':'+str(tlc)] \n psf_vacuum.write_psf(output_file_base + \"/system_in_vac.psf\")\n # revert the changes\n for atom in psf.atoms:\n if atom.dummy_type.startswith('DD'):\n atom.type = atom.save_type\n psf_vacuum[':'+str(tlc)].write_pdb(output_file_base + '/system_in_vac.pdb')\n\n # add newline\n h = open(output_file_base + \"/system_in_vac.psf\", \"a\")\n h.write('\\n')\n h.close()\n\n\n else:\n for atom in psf.atoms:\n if atom.dummy_type.startswith('DD'):\n atom.save_type = atom.type\n atom.type = atom.dummy_type\n psf.write_psf(output_file_base + \"/system_in_solv.psf\")\n # add newline\n h = open(output_file_base + \"/system_in_solv.psf\", \"a\")\n h.write('\\n')\n h.close()\n # revert the changes\n for atom in psf.atoms:\n if atom.dummy_type.startswith('DD'):\n atom.type = atom.save_type\n psf.write_pdb(output_file_base + '/system_in_solv.pdb')\n \n \n \n # unfortunatelly, the psf writer is faulty - therefore two white spaces have to be manually insterted\n # open: \n # ~/anaconda3/pkgs/parmed-2.7.3-py27_1/lib/python2.7/site-packages/parmed/formats/psf.py\n # and exchange \n #151 #dest.write(atmstr + ' '.join(atom.props) + '\\n')\n #with\n #149 # mw modified\n #150 dest.write(atmstr + str(atom.props[0]) + ' ' + str(atom.props[1]) + ' ' + str(atom.props[2]) + '\\n')\n\n \n\ndef _generate_dummy_system(output_file_base, psf, parameter, debug=False):\n\n if not os.path.exists(output_file_base):\n print('Created directory: - ' + str(output_file_base))\n os.makedirs(output_file_base)\n\n else:\n print('Writing in - ' + str(output_file_base))\n\n\n header_prm = '''* Parameters generated by analogy by\n* CHARMM General Force Field (CGenFF) program version 1.0.0\n*\n\n! Automatically obtained dummy parameters \n! using https://github.com/wiederm/SAI-relative-solvation-free-energy\n\n\n'''\n\n header_rtf = '''* Dummy atom parameters \n* test\n*\n36 1\n\n\n'''\n\n prm = open(output_file_base + '/dummy_parameters.prm', 'w')\n rtf = open(output_file_base +'/atom_dummy_definition.rtf', 'w')\n prm.write(header_prm)\n rtf.write(header_rtf)\n prm.write('\\nATOMS\\n')\n\n # for debugginb porpose:\n atom_set = set()\n bond_set = set()\n angle_set = set()\n dihedral_set = set()\n improper_set = set()\n nb_set = set()\n\n for atom in psf.atoms:\n if atom.dummy_type in atom_set:\n continue\n else:\n atom_set.add(atom.dummy_type)\n \n if atom.dummy_type.startswith('DD'):\n print('- Setting dummy parameters ...')\n print(' + Atom-Name: ', atom.name)\n print(' + Atom-Type: ', atom.type)\n print(' + Atom Dummy Type: ', atom.dummy_type)\n\n prm.write('{:7} {:6} {:6} {:6}\\n'.format('MASS', atom.dummy_counter, atom.dummy_type, atom.mass))\n rtf.write('{:7} {:6} {:6} {:6}\\n'.format('MASS', atom.dummy_counter, atom.dummy_type, atom.mass))\n \n elif debug == True:\n\n #print('- Setting parameters ...')\n #print(' + Atom-Name: ', atom.name)\n #print(' + Atom-Type: ', atom.type)\n\n prm.write('{:7} {:6} {:6} {:6}\\n'.format('MASS', parameter.atom_types[atom.type].number, atom.type, atom.mass))\n rtf.write('{:7} {:6} {:6} {:6}\\n'.format('MASS', parameter.atom_types[atom.type].number, atom.type, atom.mass))\n\n rtf.close() \n prm.write('\\n\\n')\n prm.write('BONDS\\n')\n\n for bond in psf.bonds:\n atom1, atom2 = bond.atom1, bond.atom2\n if (atom1.dummy_type, atom2.dummy_type) in bond_set:\n continue\n else:\n bond_set.add((atom1.dummy_type, atom2.dummy_type))\n\n if atom1.dummy_type.startswith('DD') or atom2.dummy_type.startswith('DD'):\n print(' >> Setting dummy bond parameters for: ' + str(atom1.dummy_type) + ' - ' + str(atom2.dummy_type))\n b= parameter.bond_types[atom1.type, atom2.type] \n prm.write('{:7} {:7} {:6.2f} {:6.2f} \\n'.format(str(atom1.dummy_type), str(atom2.dummy_type), b.k ,b.req))\n elif debug == True:\n #print(' >> Setting bond parameters for: ' + str(atom1.dummy_type) + ' - ' + str(atom2.dummy_type))\n b= parameter.bond_types[atom1.type, atom2.type] \n prm.write('{:7} {:7} {:6.2f} {:6.2f} \\n'.format(str(atom1.dummy_type), str(atom2.dummy_type), b.k ,b.req))\n\n\n\n # get all unique angles and parameters\n prm.write('\\n\\n')\n prm.write('ANGLES\\n')\n for angle in psf.angles:\n atom1, atom2, atom3 = angle.atom1, angle.atom2, angle.atom3\n if (atom1.dummy_type, atom2.dummy_type, atom3.dummy_type) in angle_set:\n continue\n elif (atom3.dummy_type, atom2.dummy_type, atom1.dummy_type) in angle_set:\n continue\n else:\n angle_set.add((atom1.dummy_type, atom2.dummy_type, atom3.dummy_type))\n\n if atom1.dummy_type.startswith('DD') or atom2.dummy_type.startswith('DD') or atom3.dummy_type.startswith('DD'):\n print(' >> Setting dummy angle parameters for: ' + str(atom1.dummy_type) + ' - ' + str(atom2.dummy_type) + ' - ' + str(atom3.dummy_type))\n b= parameter.angle_types[atom1.type, atom2.type, atom3.type]\n prm.write('{:7} {:7} {:7} {:6.2f} {:6.2f} \\n'.format(str(atom1.dummy_type), str(atom2.dummy_type), str(atom3.dummy_type), b.k ,b.theteq))\n elif debug == True:\n\n #print(' >> Setting angle parameters for: ' + str(atom1.dummy_type) + ' - ' + str(atom2.dummy_type) + ' - ' + str(atom3.dummy_type))\n b= parameter.angle_types[atom1.type, atom2.type, atom3.type]\n prm.write('{:7} {:7} {:7} {:6.2f} {:6.2f} \\n'.format(str(atom1.dummy_type), str(atom2.dummy_type), str(atom3.dummy_type), b.k ,b.theteq))\n\n\n # get all unique dihedrals and parameters\n prm.write('\\n\\n')\n prm.write('DIHEDRALS\\n')\n for dihedral in psf.dihedrals:\n atom1, atom2, atom3, atom4 = dihedral.atom1, dihedral.atom2, dihedral.atom3, dihedral.atom4\n if (atom1.dummy_type, atom2.dummy_type, atom3.dummy_type, atom4.dummy_type) in dihedral_set:\n continue\n else:\n dihedral_set.add((atom1.dummy_type, atom2.dummy_type, atom3.dummy_type, atom4.dummy_type))\n\n if atom1.dummy_type.startswith('DD') or atom2.dummy_type.startswith('DD') or atom3.dummy_type.startswith('DD') or atom4.dummy_type.startswith('DD'):\n print(' >> Setting dummy dihedral parameters for: ' + str(atom1.dummy_type) + ' - ' + str(atom2.dummy_type) + ' - ' + str(atom3.dummy_type) + ' - ' + str(atom4.dummy_type))\n b= parameter.dihedral_types[atom1.type, atom2.type, atom3.type, atom4.type]\n prm.write('{:7} {:7} {:7} {:7} {:6.2f} {:6.2f} {:6.2f} \\n'.format(str(atom1.dummy_type), str(atom2.dummy_type), str(atom3.dummy_type), str(atom4.dummy_type), b[0].phi_k ,b[0].per, b[0].phase))\n\n elif debug == True:\n #print(' >> Setting dihedral parameters for: ' + str(atom1.dummy_type) + ' - ' + str(atom2.dummy_type) + ' - ' + str(atom3.dummy_type) + ' - ' + str(atom4.dummy_type))\n b= parameter.dihedral_types[atom1.type, atom2.type, atom3.type, atom4.type]\n prm.write('{:7} {:7} {:7} {:7} {:6.2f} {:6.2f} {:6.2f} \\n'.format(str(atom1.dummy_type), str(atom2.dummy_type), str(atom3.dummy_type), str(atom4.dummy_type), b[0].phi_k ,b[0].per, b[0].phase))\n\n # get all unique improper and parameters\n prm.write('\\n\\n')\n prm.write('IMPROPERS\\n')\n for impr in psf.impropers:\n\n atom1, atom2, atom3, atom4 = impr.atom1, impr.atom2, impr.atom3, impr.atom4\n if atom1.dummy_type.startswith('DD') or atom2.dummy_type.startswith('DD') or atom3.dummy_type.startswith('DD') or atom4.dummy_type.startswith('DD'):\n print('>> Setting dummy improper parameters for: ' + str(atom1.dummy_type) + ' - ' + str(atom2.dummy_type) + ' - ' + str(atom3.dummy_type) + ' - ' + str(atom4.dummy_type))\n # carefull with this solution - > central atom has to be set in the beginning\n b= parameter.improper_types[atom2.type, atom1.type, atom3.type, atom4.type]\n prm.write('{:7} {:7} {:7} {:7} {:6.2f} {:6.2f} {:6.2f} \\n'.format(str(atom1.dummy_type), str(atom2.dummy_type), str(atom3.dummy_type), str(atom4.dummy_type), b.psi_k ,0, b.psi_eq))\n\n elif debug == True:\n #print('>> Setting improper parameters for: ' + str(atom1.dummy_type) + ' - ' + str(atom2.dummy_type) + ' - ' + str(atom3.dummy_type) + ' - ' + str(atom4.dummy_type))\n # carefull with this solution - > central atom has to be set in the beginning\n b= parameter.improper_types[atom2.type, atom1.type, atom3.type, atom4.type]\n prm.write('{:7} {:7} {:7} {:7} {:6.2f} {:6.2f} {:6.2f} \\n'.format(str(atom1.dummy_type), str(atom2.dummy_type), str(atom3.dummy_type), str(atom4.dummy_type), b.psi_k ,0, b.psi_eq))\n\n prm.write('\\n\\n')\n prm.write('''NONBONDED nbxmod 5 atom cdiel fshift vatom vdistance vfswitch -\n cutnb 14.0 ctofnb 12.0 ctonnb 10.0 eps 1.0 e14fac 1.0 wmin 1.5\n ''')\n prm.write('\\n\\n')\n\n for atom in psf.atoms:\n if atom.dummy_type in nb_set:\n continue\n else:\n nb_set.add(atom.dummy_type)\n\n if atom.dummy_type.startswith('DD'):\n prm.write('{:7} {:6} {:6} {:6}\\n'.format(atom.dummy_type, 0.0, 0.000, 0.000))\n elif debug == True:\n prm.write('{:7} {:6} {:6} {:6}\\n'.format(atom.dummy_type, 0.0, parameter.atom_types[atom.type].epsilon, parameter.atom_types[atom.type].rmin))\n\n prm.write('\\n')\n prm.write('END')\n\n prm.close()\n\n\ndef _draw_graph_from_mol(psf, tlc, node_list = []):\n\n G = nx.Graph()\n\n node_color = []\n normal_node_color = 'g'\n dummy_node_color = 'r'\n\n for atom in psf.atoms:\n if tlc.upper() not in str(atom.residue):\n continue\n\n atom_name = atom.name\n atom_index = atom.idx\n atom_type = atom.type\n\n if atom.dummy_type.startswith('DD'):\n node_name = str(atom_index) + '-' + str(atom_name) + '-' + str(atom.dummy_type)\n node_color.append(dummy_node_color)\n else:\n node_name = str(atom_index) + '-' + str(atom_name) + '-' + str(atom_type)\n node_color.append(normal_node_color)\n\n G.add_node(node_name, atom_name=atom_name, atom_index=atom_index, atom_type=atom_type)\n\n for bond in psf.bonds:\n a1 = bond.atom1\n a2 = bond.atom2\n if tlc.upper() not in str(a1.residue) or tlc.upper() not in str(a2.residue):\n continue\n\n atom_name_a1 = a1.name\n atom_index_a1 = a1.idx\n atom_type_a1 = a1.type\n atom_dummy_type_a1 = a1.dummy_type\n \n if a1.dummy_type.startswith('DD'):\n node_name_a1 = str(atom_index_a1) + '-' + str(atom_name_a1) + '-' + str(atom_dummy_type_a1)\n else:\n node_name_a1 = str(atom_index_a1) + '-' + str(atom_name_a1) + '-' + str(atom_type_a1)\n \n atom_name_a2 = a2.name\n atom_index_a2 = a2.idx\n atom_type_a2 = a2.type\n atom_dummy_type_a2 = a2.dummy_type\n\n if a2.dummy_type.startswith('DD'):\n node_name_a2 = str(atom_index_a2) + '-' + str(atom_name_a2) + '-' + str(atom_dummy_type_a2)\n else:\n node_name_a2 = str(atom_index_a2) + '-' + str(atom_name_a2) + '-' + str(atom_type_a2)\n \n G.add_edge(node_name_a1 , node_name_a2)\n\n return G, node_color\n\n\ndef generate_mutation_strategy(psf_mol1, psf_mol2, tlc):\n G1, node_color = _draw_graph_from_mol(psf_mol1, tlc)\n nx.draw(G1, with_labels=True, font_weight='bold', node_color='g', node_size=1400, alpha=0.5, font_size=12)\n plt.show()\n \n G2, node_color = _draw_graph_from_mol(psf_mol2, tlc)\n nx.draw(G2, with_labels=True, font_weight='bold', node_color='g', node_size=1400, alpha=0.5, font_size=12)\n plt.show()\n \n return G1, G2\n\ndef _set_atom_to_dummy(psf, index, dummy_atom_type, charge_acceptor):\n \n psf.number_of_dummys = int(psf.number_of_dummys) +1\n atom = psf.atoms[index]\n atom.dummy_type = dummy_atom_type\n atom.dummy_counter = 500 + int(psf.number_of_dummys)\n print('- Turning off atom and changing to dummy atom ...')\n print(' + Atom-Name: ', atom.name)\n print(' + Atom-Type: ', atom.type)\n print(' + Dummy Atom-Type: ', atom.dummy_type)\n \n # compensate charge\n print('- Compensating charge ...')\n atom_donor = psf.atoms[index]\n print(' + Dummy atom has charge: ' + str(round(atom_donor.charge, 5)))\n\n atom_acceptor = psf.atoms[charge_acceptor]\n acceptor_atom_name = atom_acceptor.name\n print(' + Acceptor atom: ', str(acceptor_atom_name))\n\n print(' + Acceptor atom has charge before transfer: ' + str(round(atom_acceptor.charge, 5)))\n\n atom_acceptor.charge = round(atom_acceptor.charge + atom_donor.charge, 5)\n print(' + Acceptor atom has charge after transfer: ' + str(round(atom_acceptor.charge, 5)))\n\n atom_donor.charge = 0.0\n\n\n \n\ndef initialize_system(ligand_three_letter_code, charmm_gui_base, input_file_name, toppar_base):\n ############################################################################\n ############################################################################\n # generate filepath for psf, rtf, sdf and crd \n psf_system = charmm_gui_base + '/' + str(input_file_name) + '.psf'\n coord = pm.charmm.CharmmCrdFile(charmm_gui_base + '/' + str(input_file_name) +'.crd')\n\n sdf_file_path = str(charmm_gui_base) + '/' + str(ligand_three_letter_code) + '/' + str(ligand_three_letter_code).upper() + '.sdf' \n rtf_file_path = str(charmm_gui_base) + '/' + str(ligand_three_letter_code) + '/' + str(ligand_three_letter_code) + '.rtf' \n\n # parse parameters and crd \n parameter_files = charmm_gui_base + '/' + str(ligand_three_letter_code) + '/' + str(ligand_three_letter_code) + '.rtf', charmm_gui_base + '/' + str(ligand_three_letter_code) + '/' + str(ligand_three_letter_code) + '.prm', toppar_base + '/top_all36_cgenff.rtf' , toppar_base + '/par_all36_cgenff.prm' , toppar_base + '/top_all36_prot.rtf' , toppar_base + '/par_all36_prot.prm' , toppar_base + '/par_all36_na.prm', toppar_base + '/toppar_water_ions.str'\n # set up parameter objec\n parameter = pm.charmm.CharmmParameterSet(*parameter_files)\n # generate topology and fill coordinates\n psf = pm.load_file(psf_system)\n psf.coordinates = coord.coordinates\n set_up_psf(psf)\n ############################################################################\n ############################################################################\n return parameter, psf, sdf_file_path, rtf_file_path \n\n\n\ndef _find_connected_heavy_atom_index_of_hydrogen(hydrogen_atom, psf_mol):\n\n for bond in psf_mol.bonds:\n a1 = bond.atom1\n a2 = bond.atom2\n\n if hydrogen_atom.idx == a1.idx:\n return a2.idx\n elif hydrogen_atom.idx == a2.idx:\n return a1.idx\n\n\ndef _find_donor_heavy_atom_index(heavy_atom, psf_mol):\n \"\"\"\n Return list of heavy atoms that are connected to \n heavy_atom in the topology of the psf_mol.\n \"\"\"\n \n dict_of_adjacent_heavy_atoms = dict()\n for bond in psf_mol.bonds:\n a1 = bond.atom1\n a2 = bond.atom2\n\n if heavy_atom.idx == a1.idx and (int(a2.mass) != 1):\n dict_of_adjacent_heavy_atoms[a2.name] = (a2)\n elif heavy_atom.idx == a2.idx and (int(a2.mass) != 1):\n dict_of_adjacent_heavy_atoms[a1.name] = (a1)\n\n list_of_adjacent_heavy_atoms_idx = list()\n\n for a in dict_of_adjacent_heavy_atoms.values():\n list_of_adjacent_heavy_atoms_idx.append(a.idx)\n\n return list_of_adjacent_heavy_atoms_idx\n\ndef _prioritize_donor_indeces(list_of_adjacent_heavy_atoms_idx, atom, index_strategy):\n \"\"\"\n Decide which heavy atom should be the donor of a charge change.\n \"\"\"\n\n current_atom_idx = atom.idx\n current_atom_position_in_index_strategy = index_strategy.index(current_atom_idx)\n\n for potential_atoms_idx in list_of_adjacent_heavy_atoms_idx:\n if potential_atoms_idx in index_strategy[:current_atom_position_in_index_strategy]:\n print('Already dummy: ', potential_atoms_idx)\n else:\n print('Candidate: ', potential_atoms_idx)\n return potential_atoms_idx\n\n\ndef _decide_donor_atom(atom, psf_mol, index_strategy):\n if(int(atom.mass) == 1):\n donor_index = _find_connected_heavy_atom_index_of_hydrogen(atom, psf_mol)\n else:\n list_of_adjacent_heavy_atoms_idx = _find_donor_heavy_atom_index(atom, psf_mol)\n if len(list_of_adjacent_heavy_atoms_idx) != 1:\n print(list_of_adjacent_heavy_atoms_idx)\n donor_index = _prioritize_donor_indeces(list_of_adjacent_heavy_atoms_idx, atom, index_strategy) \n else:\n donor_index = int(list_of_adjacent_heavy_atoms_idx[0])\n return donor_index\n\n\ndef _mutate_atoms(lambda_counter, atom_to_be_turned_off_index, index_strategy, output_directory, psf_mol, parameters, tlc, debug = False):\n if atom_to_be_turned_off_index != -1:\n\n atom = psf_mol[atom_to_be_turned_off_index]\n print('Atom to be turned off: ', atom)\n donor_index = _decide_donor_atom(atom, psf_mol, index_strategy)\n _set_atom_to_dummy(psf_mol, atom_to_be_turned_off_index, 'DDD'+str(lambda_counter), donor_index)\n else:\n print('Generating starting conformation ... ') \n\n intermediate_state_file_path = output_directory + 'is' + str(lambda_counter) +'/'\n _generate_dummy_system(intermediate_state_file_path , psf_mol, parameters, debug)\n write_psf(psf_mol, intermediate_state_file_path, tlc.upper())\n write_psf(psf_mol, intermediate_state_file_path, tlc.upper(), vacuum=True)\n G, node_color = _draw_graph_from_mol(psf_mol, tlc.upper())\n nx.draw(G, with_labels=True, font_weight='bold', node_color=node_color, node_size=1400, alpha=0.5, font_size=12)\n plt.show()\n\ndef _write_bond_breaker(constraints, psf_mol, resname, parameters, intermediate_state_file_path):\n \"\"\"Generates the CHARMM select statments for the two atoms which bond \n is broken, writes the stream file to break the bond and generates the \n MMFP restraints to keep the broken off part in proximity to the commen \n core. REMEMBER: force constant in MMFP = force constant/2.\"\"\"\n\n\n geo_sphere_bond_statments = []\n geo_sphere_angle_statments = []\n geo_sphere_torsion_statments = [] \n\n nr_of_constraints = 0\n for key in constraints:\n if 'bond' in key:\n bond_idx = constraints[key]\n atom1, atom2 = psf_mol[bond_idx[0]], psf_mol[bond_idx[1]]\n\n atom1_select_statment = 'define atom1 sele resname @{residue_name} .and. atom * * %s show end' % (atom1.name)\n atom2_select_statment = 'define atom2 sele resname @{residue_name} .and. atom * * %s show end' % (atom2.name)\n geo_sphere_bond_statments.append(atom1_select_statment)\n geo_sphere_bond_statments.append(atom2_select_statment)\n print('Breaking bond between:')\n print('Atom1 Select Statment: ', atom1_select_statment)\n print('Atom2 Select Statment: ', atom2_select_statment)\n\n break_bond = 'dele conn sele atom1 end sele atom2 end'\n geo_sphere_bond_statments.append(break_bond)\n print('Bonded constraint between atoms: ', atom1, atom2)\n\n b= parameters.bond_types[atom1.type, atom2.type]\n\n # distance constraint\n bond_statment = \"\"\"\nGEO sphere dist -\nharmonic symmetric -\nforce %f droff %f - \nselect atom1 end select atom2 end\"\"\" % (b.k/2, b.req)\n\n if nr_of_constraints == 0:\n geo_sphere_bond_statments.append('MMFP')\n\n geo_sphere_bond_statments.append(bond_statment)\n nr_of_constraints += 1\n\n if 'angle' in key:\n angle_idx = constraints[key]\n atom1, atom2, atom3 = psf_mol[angle_idx[0]], psf_mol[angle_idx[1]], psf_mol[angle_idx[2]]\n \n print('Angle constraint between atoms: ', atom1, atom2, atom3)\n\n \n b = parameters.angle_types[atom1.type, atom2.type, atom3.type]\n\n angle_statment = \"\"\"\nGEO sphere angl -\nharmonic symmetric -\nforce %f tref %f - \nselect resname @{residue_name} .and. atom * * %s end -\nselect resname @{residue_name} .and. atom * * %s end -\nselect resname @{residue_name} .and. atom * * %s end\"\"\" % (b.k/2, b.theteq, atom1.name, atom2.name, atom3.name)\n\n geo_sphere_angle_statments.append(angle_statment)\n nr_of_constraints += 1\n\n if 'torsion' in key:\n torsion_idx = constraints[key]\n atom1, atom2, atom3, atom4 = psf_mol[torsion_idx[0]], psf_mol[torsion_idx[1]], psf_mol[torsion_idx[2]], psf_mol[torsion_idx[3]]\n \n print('Torsion constraint between atoms: ', atom1, atom2, atom3, atom3)\n \n b = parameters.dihedral_types[atom1.type, atom2.type, atom3.type, atom4.type]\n \n torsion_statment = \"\"\"\nGEO sphere dihe -\nharmonic symmetric -\nforce %f tref %f - \nselect resname @{residue_name} .and. atom * * %s end -\nselect resname @{residue_name} .and. atom * * %s end -\nselect resname @{residue_name} .and. atom * * %s end - \nselect resname @{residue_name} .and. atom * * %s end\"\"\" % (b[0].phi_k * b[0].per ** 2, b[0].phase, atom1.name, atom2.name, atom3.name, atom4.name)\n\n\n #TODO: I am not sure the the above is correct for force and tref! sb und mk fragen!\n geo_sphere_torsion_statments.append(torsion_statment)\n nr_of_constraints += 1\n\n\n with open(intermediate_state_file_path + '/bond_breaker.str', 'w') as f:\n f.write('! This stream file breaks a bond between two atoms\\n')\n f.write('! and subsequently constrains the two broken parts \\n\\n')\n\n for s in geo_sphere_bond_statments:\n f.write(s + '\\n\\n\\n')\n\n for s in geo_sphere_angle_statments:\n f.write(s + '\\n\\n\\n')\n\n for s in geo_sphere_torsion_statments:\n f.write(s + '\\n\\n\\n')\n\n f.write('END\\n')\n \n\ndef test():\n pass\n\ndef _write_atom_deleter(index_strategy, psf_mol, tlc, parameters, intermediate_state_file_path):\n \"\"\"Generates the CHARMM select statments for the dummy atoms and deletes all of them.\"\"\"\n\n print('Deletes all dummy atoms and generates real commen cores')\n with open(intermediate_state_file_path + '/atom_deleter.str', 'w') as f:\n f.write('! This stream file deletes all dummy atoms\\n')\n f.write('! to connect the commen cores \\n\\n')\n\n \n for atom_idx in index_strategy:\n print(atom_idx)\n atom = psf_mol[atom_idx]\n\n atom_select_statment = 'define dummy sele resname @{residue_name} .and. atom * * %s show end' % (atom.name)\n delete_statment = 'delete atom sele dummy end'\n f.write(atom_select_statment + '\\n')\n f.write(delete_statment + '\\n\\n\\n')\n\n f.write('END\\n')\n\n\ndef _connect_commen_core(constraints, lambda_counter, atom_to_be_turned_off_index, index_strategy, output_directory, psf_mol, parameters, tlc, debug = False):\n intermediate_state_file_path = str(output_directory) + 'cc' +'/'\n _generate_dummy_system(intermediate_state_file_path , psf_mol, parameters, debug)\n write_psf(psf_mol, intermediate_state_file_path, tlc.upper())\n write_psf(psf_mol, intermediate_state_file_path, tlc.upper(), vacuum=True)\n _write_atom_deleter(index_strategy, psf_mol, tlc.upper(), parameters, intermediate_state_file_path)\n\n\ndef _generate_commen_core(constraints, lambda_counter, atom_to_be_turned_off_index, index_strategy, output_directory, psf_mol, parameters, tlc, debug = False):\n\n for con in constraints:\n if 'bond' in con:\n print('Bond will be broken between: ', constraints[con])\n\n intermediate_state_file_path = output_directory + 'is' + str(lambda_counter) +'/'\n _generate_dummy_system(intermediate_state_file_path , psf_mol, parameters, debug)\n write_psf(psf_mol, intermediate_state_file_path, tlc.upper())\n write_psf(psf_mol, intermediate_state_file_path, tlc.upper(), vacuum=True)\n _write_bond_breaker(constraints, psf_mol, tlc.upper(), parameters, intermediate_state_file_path)\n \n\n G, node_color = _draw_graph_from_mol(psf_mol, tlc.upper())\n nx.draw(G, with_labels=True, font_weight='bold', node_color=node_color, node_size=1400, alpha=0.5, font_size=12)\n plt.show()\n\ndef employ_mutation_strategy(index_strategy, constraints, output_directory, psf_mol, parameters, tlc, debug = False):\n\n # mutate atoms\n lambda_counter = 0\n for atom_to_be_turned_off_index in [-1] + index_strategy:\n\n print()\n print('##################################')\n print('Lambda-State: ' + str(lambda_counter))\n print('##################################')\n _mutate_atoms(lambda_counter, atom_to_be_turned_off_index, index_strategy, output_directory, psf_mol, parameters, tlc, debug)\n intermediate_state_file_path = output_directory + 'is' + str(lambda_counter) +'/'\n\n with open(intermediate_state_file_path + '/bond_breaker.str', 'w') as f:\n f.write('! empty bond breaker\\n\\n')\n lambda_counter += 1\n\n # break bond and set constraints\n print()\n print('##################################')\n print('Lambda-State: ' + str(lambda_counter))\n print('##################################')\n _generate_commen_core(constraints, lambda_counter, atom_to_be_turned_off_index, index_strategy, output_directory, psf_mol, parameters, tlc, debug)\n\n lambda_counter += 1\n\n # delete all dummy atoms\n print()\n print('##################################')\n print('Lambda-State: ' + str(lambda_counter))\n print('##################################')\n _connect_commen_core(lambda_counter, lambda_counter, atom_to_be_turned_off_index, index_strategy, output_directory, psf_mol, parameters, tlc, debug)\n intermediate_state_file_path = output_directory + 'cc' +'/'\n\n with open(intermediate_state_file_path + '/bond_breaker.str', 'w') as f:\n f.write('! empty bond breaker\\n\\n')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n############################################################################################\n############################################################################################\n############################################################################################\n# def run_dynamics(sim, dcd_output_filepath):\n# # Set up the reporters to report energies and coordinates every 100 steps\n# sim.reporters.append(\n# StateDataReporter(sys.stdout, 500, step=True, potentialEnergy=True,\n# kineticEnergy=True, temperature=True,\n# volume=True, density=True)\n# )\n# sim.reporters.append(app.DCDReporter(dcd_output_filepath + '/system.dcd', 500))\n\n# # Run dynamics\n# print('Running dynamics')\n# sim.step(1000000)\n\n\n# def create_vacc_omm_system(intermediate_state_file_path, charmm_gui_base):\n# print( 'Intermediate state: ' + str(intermediate_state_file_path))\n \n# dummy_rtf = intermediate_state_file_path + '/atom_dummy_definition.rtf'\n# dummy_prm = intermediate_state_file_path + '/dummy_parameters.prm'\n# psf_file_path = intermediate_state_file_path + '/system_vac.psf'\n# crd_file_path = intermediate_state_file_path + '/system_vac.pdb'\n \n# parameter_files = charmm_gui_base + '/' + ligand_three_letter_code + '/' + ligand_three_letter_code + '.rtf', charmm_gui_base + '/' + ligand_three_letter_code + '/' + ligand_three_letter_code + '.prm', charmm_gui_base + '/toppar/top_all36_cgenff.rtf' , charmm_gui_base + '/toppar/par_all36_cgenff.prm' , charmm_gui_base + '/toppar/top_all36_prot.rtf' , charmm_gui_base + '/toppar/par_all36m_prot.prm' , charmm_gui_base + '/toppar/top_all36_prot.rtf' , charmm_gui_base + '/toppar/par_all36_na.prm', charmm_gui_base + '/toppar/toppar_water_ions.str', dummy_rtf, dummy_prm\n \n# parameter = pm.charmm.CharmmParameterSet(*parameter_files)\n# # this time I use the openMM CharmmPSFFile generator\n# psf = app.charmmpsffile.CharmmPsfFile(psf_file_path)\n# crd = app.PDBFile(crd_file_path)\n\n# coords = crd.positions\n \n# print('Creating OpenMM System')\n# system = psf.createSystem(parameter, nonbondedMethod=app.NoCutoff)\n \n \n \n# # Create the integrator to do Langevin dynamics\n# integrator = mm.LangevinIntegrator(\n# 300*u.kelvin, # Temperature of heat bath\n# 1.0/u.picoseconds, # Friction coefficient\n# 2.0*u.femtoseconds, # Time step\n# )\n\n# # Define the platform to use; CUDA, OpenCL, CPU, or Reference. Or do not specify\n# # the platform to use the default (fastest) platform\n# platform = mm.Platform.getPlatformByName('CPU')\n# #prop = dict(CudaPrecision='mixed') # Use mixed single/double precision\n\n# # Create the Simulation object\n# #sim = app.Simulation(psf.topology, system, integrator, platform, prop)\n# sim = app.Simulation(psf.topology, system, integrator, platform)\n \n# # Set the particle positions\n# sim.context.setPositions(crd.positions)\n\n# # Minimize the energy\n# print('Minimizing energy')\n# sim.minimizeEnergy(maxIterations=1000)\n# print( 'Finished minimizing energy')\n# return sim\n\n\n \n# def create_solvated_omm_system(intermediate_state_file_path, charmm_gui_base):\n \n# print( 'Intermediate state: ' + str(intermediate_state_file_path))\n \n# dummy_rtf = intermediate_state_file_path + '/atom_dummy_definition.rtf'\n# dummy_prm = intermediate_state_file_path + '/dummy_parameters.prm'\n# psf_file_path = intermediate_state_file_path + '/system_sol.psf'\n# crd_file_path = intermediate_state_file_path + '/system_sol.pdb'\n \n# parameter_files = charmm_gui_base + '/' + ligand_three_letter_code + '/' + ligand_three_letter_code + '.rtf', charmm_gui_base + '/' + ligand_three_letter_code + '/' + ligand_three_letter_code + '.prm', charmm_gui_base + '/toppar/top_all36_cgenff.rtf' , charmm_gui_base + '/toppar/par_all36_cgenff.prm' , charmm_gui_base + '/toppar/top_all36_prot.rtf' , charmm_gui_base + '/toppar/par_all36m_prot.prm' , charmm_gui_base + '/toppar/top_all36_prot.rtf' , charmm_gui_base + '/toppar/par_all36_na.prm', charmm_gui_base + '/toppar/toppar_water_ions.str', dummy_rtf, dummy_prm\n \n# parameter = pm.charmm.CharmmParameterSet(*parameter_files)\n# # this time I use the openMM CharmmPSFFile generator\n# psf = app.charmmpsffile.CharmmPsfFile(psf_file_path)\n# crd = app.PDBFile(crd_file_path)\n\n# coords = crd.positions\n# min_crds = [coords[0][0], coords[0][1], coords[0][2]]\n# max_crds = [coords[0][0], coords[0][1], coords[0][2]]\n\n# for coord in coords:\n# min_crds[0] = min(min_crds[0], coord[0])\n# min_crds[1] = min(min_crds[1], coord[1])\n# min_crds[2] = min(min_crds[2], coord[2])\n# max_crds[0] = max(max_crds[0], coord[0])\n# max_crds[1] = max(max_crds[1], coord[1])\n# max_crds[2] = max(max_crds[2], coord[2])\n \n \n \n# print( 'Length x-axis: ' + str(max_crds[0]-min_crds[0]))\n# print( 'Length y-axis: ' + str(max_crds[1]-min_crds[1]))\n# print( 'Length z-axis: ' + str(max_crds[2]-min_crds[2]))\n \n# psf.setBox(max_crds[0]-min_crds[0],\n# max_crds[1]-min_crds[1],\n# max_crds[2]-min_crds[2],\n# )\n \n# print('Creating OpenMM System')\n# system = psf.createSystem(parameter, nonbondedMethod=app.PME,\n# constraints=app.HBonds, nonbondedCutoff=12.0*u.angstroms,\n# switchDistance=10.0*u.angstroms)\n \n \n \n# # Create the integrator to do Langevin dynamics\n# integrator = mm.LangevinIntegrator(\n# 300*u.kelvin, # Temperature of heat bath\n# 1.0/u.picoseconds, # Friction coefficient\n# 2.0*u.femtoseconds, # Time step\n# )\n\n# # Define the platform to use; CUDA, OpenCL, CPU, or Reference. Or do not specify\n# # the platform to use the default (fastest) platform\n# platform = mm.Platform.getPlatformByName('CPU')\n# prop = dict(CudaPrecision='mixed') # Use mixed single/double precision\n\n# # Create the Simulation object\n# sim = app.Simulation(psf.topology, system, integrator, platform, prop)\n \n# # Set the particle positions\n# sim.context.setPositions(crd.positions)\n\n# # Minimize the energy\n# print('Minimizing energy')\n# sim.minimizeEnergy(maxIterations=1000)\n# print('Finished minimizing energy')\n# return sim\n\n\n\n\n\n\n\n\n","sub_path":"bin/SAI.py","file_name":"SAI.py","file_ext":"py","file_size_in_byte":34689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"237744795","text":"import json, datetime\n\nfrom django.http import Http404, HttpResponse\nfrom django.contrib.auth.models import User\nfrom django.db.models import Q#using for orm queries\n#django libraries\n\nfrom rest_framework import status, permissions\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\n#rest api generics(for classes)\nfrom rest_framework.decorators import api_view\n#request method control for functions\n\nfrom articles.models import Article\nfrom articles.serializers import ArticleSerializer\n#api models & serializers\n\nclass ArticleList(APIView) :\n permission_classes = (permissions.IsAuthenticated,)#only for authenticated users\n def get_object_older_than_24(self) :\n try :\n time_threshold = datetime.datetime.now() - datetime.timedelta(hours=24)\n return Article.objects.filter(created__lt=time_threshold)#find articles that is older than 24 hours\n except Article.DoesNotExist :\n return False\n def get(self, request, format=None) :\n articles = Article.objects.filter(owner=request.user).values()\n #get articles that only requested user owns\n return Response(articles, status=status.HTTP_200_OK)\n #A function that returns all of the articles that the user created\n def post(self, request, format=None) :\n serializer = ArticleSerializer(data=request.data)\n if serializer.is_valid() :\n time_string = datetime.datetime.now().strftime('%Y-%m-%d')\n if(request.data['created'].find(time_string) == -1 ):\n return Response(status=status.HTTP_400_BAD_REQUEST)\n #if the servertime and the article's time is not the same\n serializer.save(owner=request.user)\n jsonString = {}\n jsonString['id'] = int(serializer.data['id'])#add id to the response\n return HttpResponse(json.dumps(jsonString),\n content_type=\"application/json\",\n status=status.HTTP_201_CREATED) #post uploaded\n return Response(serializer.errors,\n status=status.HTTP_400_BAD_REQUEST) #if requested informations aren't accurate\n def delete(self, request, format=None) :\n articles = self.get_object_older_than_24()\n if(Article != False) :\n articles.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n #A function that removes articles that is older than 24 hours.\n #A function that adds a new article\n#About the all articles\n\nclass ArticleDetail(APIView) :\n def get_object_by_id(self, pk) :\n try :\n return Article.objects.get(id=pk)\n except Article.DoesNotExist :\n raise Http404\n #setted to raise 404 when article doesn't exist because it's much efficient to write once\n def patch(self, request, pk, format=None) :\n article = self.get_object_by_id(pk)\n serializer = ArticleSerializer(article, data=request.data)\n if str(getattr(article, 'owner')) == request.user.username :#check the user and the owner of article is the same\n if serializer.is_valid() :\n serializer.save()\n return Response(serializer.data)\n else :\n return Response(\"This is not your article.\", status=status.HTTP_400_BAD_REQUEST)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n#About each article","sub_path":"articles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"235211162","text":"#!/usr/bin/python\n\nnops = \"A\" * 386 + \"B\" * 4 + \"C\" * 30 \nbuff = nops \n\n#[nops][ egghunter][short jmp (nseh)][seh (pop pop ret)][nops][w00tw00t][shellcode]\n\nf = open(\"users.txt\",'w')\nf.write(buff)\nf.close()","sub_path":"BoF/Egg Hunter BoF/minishare-15575/bs.py","file_name":"bs.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"404791629","text":"from flask import Flask, abort\n\nfrom werkzeug.exceptions import HTTPException, default_exceptions, Aborter, _aborter\nfrom werkzeug.http import HTTP_STATUS_CODES\n\nclass Payme(HTTPException):\n code = 400\n description = 'Hello?'\n\ndefault_exceptions[400] = Payme # ERROR CODe\n\n_aborter = Aborter()\n\nHTTP_STATUS_CODES[400] = 'hello'\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n abort(400)\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"Trash/flask/4m09d/main3.py","file_name":"main3.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"504883757","text":"#!/usr/bin/python3\nimport argparse\nfrom time import time\n\nimport numpy as np\nfrom h5py import File\nfrom keras import backend as K\nfrom keras.layers import Convolution2D, ZeroPadding2D, MaxPooling2D\nfrom keras.models import Sequential\nfrom scipy.misc import imread, imresize, imsave\nfrom scipy.optimize import fmin_l_bfgs_b as f_min\n\n\nclass Evaluator(object):\n \"\"\"\n Класс Evaluator позволяет вычислить потерю и градиенты одновременно, позволяя достичь большей эффективности\n \"\"\"\n\n def __init__(self, dim, outputs):\n self.loss_value = None\n self.gradient_values = None\n self.dim = dim\n self.outputs = outputs\n\n def eval(self, x):\n x = x.reshape((1, 3, self.dim, self.dim))\n outs = self.outputs([x])\n loss_value = outs[0]\n if len(outs[1:]) == 1:\n gradient_values = outs[1].flatten().astype('float64')\n else:\n gradient_values = np.array(outs[1:]).flatten().astype('float64')\n\n return loss_value, gradient_values\n\n def loss(self, x):\n assert self.loss_value is None\n\n loss_value, gradient_values = self.eval(x)\n self.loss_value = loss_value\n self.gradient_values = gradient_values\n\n return self.loss_value\n\n def gradients(self, x):\n assert self.loss_value is not None\n\n gradient_values = np.copy(self.gradient_values)\n self.loss_value = None\n self.gradient_values = None\n\n return gradient_values\n\n\nclass StyleTransformer:\n def __init__(self,\n base_path,\n style_path,\n prefix,\n weights_path='vgg16_weights.h5',\n total_variation_weight=1.0,\n style_weight=0.2,\n content_weight=0.8,\n dim=512,\n iterations=10):\n\n self.base_path = base_path\n self.style_path = style_path\n self.prefix = prefix\n self.weights_path = weights_path\n\n self.total_variation_weight = total_variation_weight\n self.style_weight = style_weight\n self.content_weight = content_weight\n\n # Размер сгенерированных изображений - [dim * dim] пикс.\n self.dim = dim\n\n self.iterations = iterations\n\n def img_to_vec(self, image_path):\n img = imresize(imread(image_path), (self.dim, self.dim))\n img = img.transpose((2, 0, 1)).astype('float64')\n img = np.expand_dims(img, axis=0)\n\n return img\n\n @staticmethod\n def vec_to_img(x):\n x = x.transpose((1, 2, 0))\n x = np.clip(x, 0, 255).astype('uint8')\n\n return x\n\n def get_model(self, input_vec):\n first_layer = ZeroPadding2D((1, 1))\n first_layer.set_input(input_vec, shape=(3, 3, self.dim, self.dim))\n\n model = Sequential()\n model.add(first_layer)\n model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(64, 3, 3, activation='relu'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(128, 3, 3, activation='relu'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(256, 3, 3, activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(256, 3, 3, activation='relu'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(512, 3, 3, activation='relu'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(512, 3, 3, activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(512, 3, 3, activation='relu'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\n # Загрузка весов из VGG16\n file = File(self.weights_path)\n for i in range(file.attrs['nb_layers']):\n if i >= len(model.layers):\n break\n\n layer = file['layer_{}'.format(i)]\n weights = [layer['param_{}'.format(j)] for j in range(layer.attrs['nb_params'])]\n model.layers[i].set_weights(weights)\n\n file.close()\n\n print('Model loaded\\n')\n\n return model\n\n # Матрица Грама от векторизованного изображения\n @staticmethod\n def gram(x):\n assert K.ndim(x) == 3\n\n features = K.batch_flatten(x)\n gram = K.dot(features, K.transpose(features))\n\n return gram\n\n def style_loss(self, style, combination):\n assert K.ndim(style) == 3\n assert K.ndim(combination) == 3\n\n style_gram = self.gram(style)\n combination_gram = self.gram(combination)\n channels = 3\n size = self.dim * self.dim\n\n return K.sum(K.square(style_gram - combination_gram)) / (4. * (channels ** 2) * (size ** 2))\n\n @staticmethod\n def content_loss(base, combination):\n return K.sum(K.square(combination - base))\n\n def total_variation_loss(self, x):\n assert K.ndim(x) == 4\n\n a = K.square(x[:, :, :self.dim - 1, :self.dim - 1] - x[:, :, 1:, :self.dim - 1])\n b = K.square(x[:, :, :self.dim - 1, :self.dim - 1] - x[:, :, :self.dim - 1, 1:])\n\n return K.sum(K.pow(a + b, 1.25))\n\n def transform(self):\n # Получение векторного представления изображений\n base = K.variable(self.img_to_vec(self.base_path))\n style = K.variable(self.img_to_vec(self.style_path))\n\n result = K.placeholder((1, 3, self.dim, self.dim))\n\n # Объединение трех векторов в ��дин\n input_vec = K.concatenate([base, style, result], axis=0)\n\n model = self.get_model(input_vec)\n\n outputs = dict([(layer.name, layer.output) for layer in model.layers])\n\n # Сборка функций потерь в один скаляр\n loss = K.variable(0.0)\n layer_features = outputs['conv4_2']\n base_features = layer_features[0, :, :, :]\n result_features = layer_features[2, :, :, :]\n loss += self.content_weight * self.content_loss(base_features, result_features)\n\n feature_layers = ['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']\n for layer in feature_layers:\n layer_features = outputs[layer]\n style_features = layer_features[1, :, :, :]\n result_features = layer_features[2, :, :, :]\n style_loss = self.style_loss(style_features, result_features)\n loss += (self.style_weight / len(feature_layers)) * style_loss\n\n loss += self.total_variation_weight * self.total_variation_loss(result)\n\n # Вычисление градиентов полученного изображения\n gradients = K.gradients(loss, result)\n\n outputs = [loss]\n if type(gradients) in {list, tuple}:\n outputs += gradients\n else:\n outputs.append(gradients)\n\n func_outputs = K.function([result], outputs)\n\n evaluator = Evaluator(self.dim, func_outputs)\n\n # Минимизация потери стиля на полученном изображении с помощью L-BFGS-оптимизации из SciPy\n x = np.random.uniform(0, 255, (1, 3, self.dim, self.dim))\n for iteration in range(self.iterations):\n print('Iteration #{}'.format(iteration))\n\n start = time()\n\n x, min_val, info = f_min(evaluator.loss, x.flatten(), fprime=evaluator.gradients, maxfun=20)\n\n print('\\tCurrent loss value:', min_val)\n\n # Сохранение текущего изображения\n img = self.vec_to_img(x.reshape((3, self.dim, self.dim)))\n filename = self.prefix + '_{}.png'.format(iteration)\n imsave(filename, img)\n\n end = time()\n\n print('\\t{} saved'.format(filename))\n print('\\tIteration {0} completed in {1}s\\n'.format(iteration, end - start))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('base_path', metavar='base', type=str, help='Path to the image to transform.')\n parser.add_argument('style_path', metavar='style', type=str, help='Path to the style reference image.')\n parser.add_argument('prefix', metavar='prefix', type=str, help='Prefix for the saved results.')\n parser.add_argument('iterations', metavar='epoch', type=int, help='Count of iterations to compute.')\n parser.add_argument('dim', metavar='res', type=int, help='Count of pixels of result image along the one side')\n parser.add_argument('alpha', metavar='content', type=float, help='Content weight')\n parser.add_argument('beta', metavar='style', type=float, help='style weight')\n args = parser.parse_args()\n\n transformer = StyleTransformer(base_path=args.base_path,\n style_path=args.style_path,\n prefix=args.prefix,\n iterations=args.iterations,\n dim=args.dim,\n content_weight=args.alpha,\n style_weight=args.beta)\n transformer.transform()\n","sub_path":"transfer.py","file_name":"transfer.py","file_ext":"py","file_size_in_byte":10147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"639971704","text":"# 除了某个元素只出现一次以外,其余每个元素均出现两次\nclass Solution0:\n def singleNumber(self, nums: List[int]) -> int:\n res = nums[0]\n for i in range(1, len(nums)):\n res = res ^ nums[i]\n return res\n\n# 除了某个元素只出现一次以外,其余每个元素均出现了三次\n# 法1\nclass Solution1:\n def singleNumber(self, nums: List[int]) -> int:\n res = 0\n for i in range(0, 32):\n mask = 1 << i\n count = 0\n # 统计与的数目\n for j in range(len(nums)):\n if nums[j] & mask:\n count += 1\n if count % 3:\n res = res | mask\n return self.convert(res)\n ## python 类型也是对象,所以负数没法获得\n def convert(self,x):\n if x >= 2**31:\n x -= 2**32\n return x\n\n# 法2\nclass Solution2:\n def singleNumber(self, nums: List[int]) -> int:\n a, b = 0, 0\n for num in nums:\n a = (a ^ num) & ~b\n b = (b ^ num) & ~a\n return a\n\n# 恰好有两个元素只出现一次,其余所有元素均��现两次。 找出只出现一次的那两个元素\nclass Solution:\n def singleNumber(self, nums: List[int]) -> List[int]:\n if len(nums)<2: return []\n res1, res2 = 0, 0\n r = 0\n for num in nums:\n r ^= num\n # a & (-a) 可以获得a最低的非0位\n # 比如 a 的二进制是 10000,-a 等价于取反后加1,取反就是01111,加1就是10000,最后整体相与的结果就是00000010000\n count = r & -r\n for num in nums:\n if num & count:\n res1 ^= num\n else:\n res2 ^= num\n return [res1, res2]","sub_path":"Leetcode/算法思想/位运算/只出现一次的数系列.py","file_name":"只出现一次的数系列.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"515282172","text":"\"\"\"This module implements the TaggedGate Class.\"\"\"\nfrom __future__ import annotations\n\nfrom typing import Any\nfrom typing import Sequence\n\nimport numpy as np\n\nfrom bqskit.ir.gate import Gate\nfrom bqskit.ir.gates.composedgate import ComposedGate\nfrom bqskit.qis.unitary.differentiable import DifferentiableUnitary\nfrom bqskit.qis.unitary.optimizable import LocallyOptimizableUnitary\nfrom bqskit.qis.unitary.unitarymatrix import UnitaryMatrix\n\n\nclass TaggedGate(\n ComposedGate,\n LocallyOptimizableUnitary,\n DifferentiableUnitary,\n):\n \"\"\"\n The TaggedGate Class.\n\n Allows a user to place a tag on a gate.\n \"\"\"\n\n def __init__(self, gate: Gate, tag: Any) -> None:\n \"\"\"Associate `tag` with `gate`.\"\"\"\n\n if not isinstance(gate, Gate):\n raise TypeError('Expected gate object, got %s' % type(gate))\n\n self.gate = gate\n self.tag = tag\n self.name = 'Tagged(%s:%s)' % (gate.get_name(), tag)\n self.num_params = gate.get_num_params()\n self.size = gate.get_size()\n self.radixes = gate.get_radixes()\n\n # If input is a constant gate, we can cache the unitary.\n if self.num_params == 0:\n self.utry = gate.get_unitary()\n\n def get_unitary(self, params: Sequence[float] = []) -> UnitaryMatrix:\n \"\"\"Returns the unitary for this gate, see Unitary for more info.\"\"\"\n self.check_parameters(params)\n if hasattr(self, 'utry'):\n return self.utry\n\n return self.gate.get_unitary(params)\n\n def get_grad(self, params: Sequence[float] = []) -> np.ndarray:\n \"\"\"\n Returns the gradient for this gate, see Gate for more info.\n\n Notes:\n The derivative of the conjugate transpose of matrix is equal\n to the conjugate transpose of the derivative.\n \"\"\"\n self.check_parameters(params)\n if hasattr(self, 'utry'):\n return np.array([])\n\n return self.gate.get_grad(params) # type: ignore\n\n def optimize(self, env_matrix: np.ndarray) -> list[float]:\n \"\"\"Returns optimal parameters with respect to an environment matrix.\"\"\"\n if hasattr(self, 'utry'):\n return []\n self.check_env_matrix(env_matrix)\n return self.gate.optimize(env_matrix) # type: ignore\n","sub_path":"bqskit/ir/gates/composed/tagged.py","file_name":"tagged.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"110482230","text":"import random\nimport subprocess\nimport requests\nimport json\nfrom datetime import datetime\nurl_flume = 'http://localhost:81'\nheaders = {'content-type': 'application/json'}\n\ngeolist=['7zzzzz','dr5red','dr5ree','dr5ref','dr5reg','dr5reu','dr5rev','dr5rey','dr5rez','dr5rgb','dr5rgc','dr5rkj','dr5rkm','dr5rkn','dr5rkz','dr5rmh','dr5rnm','dr5rsg','dr5rsh','dr5rsj','dr5rsk','dr5rsm','dr5rsn','dr5rsp','dr5rsq','dr5rsr','dr5rss','dr5rsw','dr5rsx','dr5rt8','dr5ru0','dr5ru1','dr5ru2','dr5ru3','dr5ru4','dr5ru5','dr5ru6','dr5ru7','dr5ru8','dr5ru9','dr5rud','dr5rue','dr5rug','dr5ruh','dr5ruj','dr5rum','dr5ruq','dr5rus','dr5rut','dr5ruu','dr5ruv','dr5ruw','dr5rux','dr5ruy','dr5ruz','dr5rv6','dr5rvd','dr5rvh','dr5rvj','dr5rvn','dr5rvp','dr5rvs','dr5ryy','dr5rzj','dr5x0z','dr5x1p','dr5x2c','dr72h8','dr72h9','dr72hb','dr72hc','dr72hd','dr72hf','dr72hg','dr72j0','dr72j2','dr72j3','dr72j5','dr72j6','dr72je','dr72jh','dr72m2']\n\nmsglist=[20,30,40,50,60]\n\nwhile True:\n for i in random.sample(geolist, 50):\n for j in random.sample(msglist, 1):\n while j>0:\n payload = [{\"headers\":{\"topic\":\"test\"},\"body\":'{\"'+str(datetime.now().strftime(\"%Y-%M-%d,%X\"))+','+str(i)+'\"}'}]\n msg=str(datetime.now().strftime(\"%Y-%M-%d,%X\"))+\",\"+str(i)\n with requests.Session() as session:\n session.post(url_flume, data=json.dumps(payload),headers=headers)\n j-=1\n\n","sub_path":"flumeHttpProducer.py","file_name":"flumeHttpProducer.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"540877714","text":"from torch import nn\nfrom torch.nn.modules.utils import _pair\n\nfrom .. import functional as F\n\n__all__ = [\"Aggregation\"]\n\n\nclass Aggregation(nn.Module):\n \"\"\"\n\n\"\"\"\n\n def __init__(self, kernel_size, stride, padding, dilation, pad_mode):\n \"\"\"\n\n:param kernel_size:\n:type kernel_size:\n:param stride:\n:type stride:\n:param padding:\n:type padding:\n:param dilation:\n:type dilation:\n:param pad_mode:\n:type pad_mode:\n\"\"\"\n super().__init__()\n self.kernel_size = _pair(kernel_size)\n self.stride = _pair(stride)\n self.padding = _pair(padding)\n self.dilation = _pair(dilation)\n self.pad_mode = pad_mode\n\n def forward(self, input, weight):\n \"\"\"\n\n:param input:\n:type input:\n:param weight:\n:type weight:\n:return:\n:rtype:\n\"\"\"\n return F.aggregation(\n input,\n weight,\n self.kernel_size,\n self.stride,\n self.padding,\n self.dilation,\n self.pad_mode,\n )\n","sub_path":"neodroidvision/classification/architectures/self_attention_network/self_attention_modules/modules/aggregation.py","file_name":"aggregation.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"83976198","text":"# Author:tanzhiling\nimport pickle\ndef sleep():\n print(\"我睡好了\")\ninfo = {\n \"name\":\"tzl\",\n \"age\":22,\n \"func\":sleep()\n}\n# 序列化\n# f = open(\"test.txt\",\"wb\")\n# f.write(pickle.dumps(info))\n# pickle.dump(info,f)\n# 反序列化\nf = open(\"test.txt\",\"rb\")\n# data = pickle.loads(f.read())\ndata = pickle.load(f)\nf.close()\nprint(data)","sub_path":"demo04/pickle数据序列化.py","file_name":"pickle数据序列化.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"343098370","text":"# %load q04_plot_runs_by_balls/build.py\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\nipl_df = pd.read_csv('data/ipl_dataset.csv', index_col=None)\n\n\n# Solution\ndef plot_runs_by_balls():\n bat=ipl_df.groupby(['batsman','match_code']).agg({'runs':sum,'delivery':'count'})\n plt.scatter(bat.delivery,bat.runs)\n plt.xlabel('balls faced')\n plt.ylabel('runs scored')\n plt.show()\nplot_runs_by_balls()\n\n","sub_path":"q04_plot_runs_by_balls/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"292625210","text":"# Tornado Libraries\nimport tornado.ioloop\nimport tornado.web\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.options\n\nfrom PIL import Image\nimport io\nimport numpy\nimport os\nimport sys\nimport time\nimport numpy as np\nimport pandas as pd\n\nimport torch\nfrom torch.optim import Adam\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\n\nfrom torchvision import datasets\nfrom torchvision import transforms\n\nimport utils\nfrom net import Net, Vgg16\n\ndataset_df = pd.read_csv('dataset.csv')\nstatic_file_path = 'static'\n\nmodel = './21styles.model'\nstyle_images_path = 'images/museum_styles/'\n\nstyle_model = Net(ngf=128)\nstyle_model.load_state_dict(torch.load(model), False)\ncuda = torch.cuda.is_available()\n\nif cuda:\n style_model.cuda()\n content_image = content_image.cuda()\n style = style.cuda()\n\n\ndef evaluate(raw_content_image, raw_content_size, style_image, style_size, cuda, output_name):\n content_image = utils.tensor_load_rgbimage(\n raw_content_image, size=raw_content_size, keep_asp=True)\n content_image = content_image.unsqueeze(0)\n style = utils.tensor_load_rgbimage(style_image, size=style_size)\n style = style.unsqueeze(0)\n style = utils.preprocess_batch(style)\n\n style_v = Variable(style)\n\n content_image = Variable(utils.preprocess_batch(content_image))\n style_model.setTarget(style_v)\n\n output = style_model(content_image)\n transfer_image = utils.tensor_save_bgrimage(\n output.data[0], output_name, cuda)\n return transfer_image\n\n\nclass UploadHandler(tornado.web.RequestHandler):\n\n def post(self):\n file = self.request.files['file'][0]\n style_id = self.get_argument('style_id')\n content = file['body']\n image = (io.BytesIO(content))\n print(image)\n image_path = style_images_path + style_id + '.jpg'\n image_path_JPG = style_images_path + style_id + '.JPG'\n if os.path.exists(image_path) or os.path.exists(image_path_JPG):\n result_image = evaluate(\n image, 512, \n image_path, \n 512, cuda, \n os.path.join(static_file_path, file['filename']))\n # 'file.jpg')\n response = {}\n response['style_image'] = '/static/'+file['filename']\n self.set_header(\"Content-type\", \"image/png\")\n self.write(response)\n else:\n self.write({'Response': 'Image not Found'})\n\n\nclass DatasetHandler(tornado.web.RequestHandler):\n\n def get(self):\n # print(dataset_df.head())\n dataset = {}\n dataset['images'] = []\n # print(dataset_df)\n for index, row in dataset_df.iterrows():\n item = {}\n item['Title'] = row['Title']\n item['Database ID'] = row['Database ID']\n item['Link'] = row['Link']\n item['Drive-Link'] = row['Drive-Link']\n dataset['images'].append(item)\n self.write(dataset)\n\napp = tornado.web.Application([\n (r'/upload', UploadHandler),\n (r'/dataset', DatasetHandler)\n], debug=True, static_path=static_file_path)\n\napp.listen(9001)\ntornado.ioloop.IOLoop.instance().start()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"579220000","text":"import numpy as np\nfrom collections import defaultdict\nfrom nearest_neighbors import KNNClassifier\n\n\ndef accuracy(y, y_predict):\n return float(np.sum(y == y_predict)) / float(y.shape[0])\n\n\ndef kfold(n, n_folds):\n result = []\n q = n // n_folds\n r = n % n_folds\n left_border = 0\n for idx in range(n_folds):\n chunk_size = q + 1 if idx < r else q\n right_border = left_border + chunk_size\n result.append(\n (\n np.concatenate((np.arange(0, left_border), np.arange(right_border, n))),\n np.arange(left_border, right_border)\n )\n )\n left_border += chunk_size\n return result\n\n\ndef knn_cross_val_score(x, y, k_list, score='accuracy', cv=None, evaluate_time=False, **kwargs):\n result_list = defaultdict(list)\n result_array = dict()\n\n n_classes = np.max(y) + 1\n\n if score == 'accuracy':\n scorer = accuracy\n else:\n raise NotImplementedError\n\n if cv is None:\n cv = kfold(x.shape[0], 3)\n\n use_weights = kwargs['weights'] if 'weights' in kwargs.keys() else True\n classifier = KNNClassifier(\n k=k_list[-1],\n strategy=kwargs['strategy'] if 'strategy' in kwargs.keys() else 'my_own',\n metric=kwargs['metric'] if 'metric' in kwargs.keys() else 'euclidean',\n weights=use_weights,\n test_block_size=kwargs['test_block_size'] if 'test_block_size' in kwargs.keys() else 100\n )\n\n for train_idxs, test_idxs in cv:\n classifier.fit(x[train_idxs], y[train_idxs])\n if use_weights:\n distances, idx_neighbors = classifier.find_kneighbors(x[test_idxs], return_distance=True)\n else:\n idx_neighbors = classifier.find_kneighbors(x[test_idxs], return_distance=False)\n\n weights = 1. / (distances + 1e-5) if use_weights else None\n\n class_scores = np.zeros([test_idxs.shape[0], n_classes], dtype=np.float64)\n neighbor_classes = y[train_idxs][idx_neighbors]\n for k in range(k_list[-1]):\n for idx in range(test_idxs.shape[0]):\n class_scores[idx, neighbor_classes[idx, k]] += (weights[idx, k] if use_weights else 1.)\n y_predict = np.argmax(class_scores, axis=1)\n if k + 1 in k_list:\n result_list[k + 1].append(scorer(y[test_idxs], y_predict))\n\n for key in result_list.keys():\n result_array[key] = np.array(result_list[key], dtype=np.float64)\n\n return result_array\n","sub_path":"Tasks/task1/cross_validation.py","file_name":"cross_validation.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"559682241","text":"\"\"\"ImageColumnCropOperators module; imported by ImageOperate aggregate class.\"\"\"\n\nfrom PIL import Image\nimport statistics\nimport numpy as np\nimport pandas as pd\n\ndef column_cutter(image_array, cutpoints, iteration, iteration_list):\n \"\"\"Crops full image array on defined crop points.\"\"\"\n\n # Cut array on crop points; add cropped array segments to 'iteration_list'.\n working_image = image_array\n if len(cutpoints) > 0:\n for cut in cutpoints:\n\n column_right = working_image[:, cut:]\n column_left = working_image[:, 0:cut]\n\n if iteration == 'vertical':\n iteration_list.append(column_right)\n\n elif iteration == 'horizontal':\n iteration_list.append(np.rot90(column_right, k=1))\n\n if len(cutpoints) > 1:\n working_image = column_left\n continue\n\n if iteration == 'vertical':\n iteration_list.append(column_left)\n\n elif iteration == 'horizontal':\n iteration_list.append(np.rot90(column_left, k=1))\n\ndef convert_rolling_mean(array, axis, interval, offset):\n \"\"\"Take rolling mean of image array.\"\"\"\n\n if offset == 0:\n axis_rollmean = ((pd.DataFrame(list((array.mean(axis=axis)) / 255)))\n .rolling(interval).mean())\n else:\n axis_rollmean = ((pd.DataFrame(list((array.mean(axis=axis)) / 255)))\n .rolling(interval).mean()).iloc[offset : -(offset)]\n\n axis_rollmean.columns = ['values']\n return axis_rollmean\n\ndef convert_rolling_sdev(array, axis, interval, offset):\n \"\"\"Take rolling sdev of image array.\"\"\"\n\n if offset == 0:\n axis_rollsdev = ((pd.DataFrame(list((array.std(axis=axis)) / 255)))\n .rolling(interval).std())\n else:\n axis_rollsdev = ((pd.DataFrame(list((array.std(axis=axis)) / 255)))\n .rolling(interval).std()).iloc[offset : -(offset)]\n\n axis_rollsdev.columns = ['values']\n return axis_rollsdev\n\ndef list_clean(array, check_list, array_offset, column_offset, under_list_threshold):\n \"\"\"\"\"\"\n\n trimmed_array = array[array_offset:-array_offset, :]\n\n remove_list = []\n for list_item in check_list:\n check_list_range = list(range((list_item - column_offset), (list_item + column_offset), 1))\n\n interference_count = 0\n for index, item in enumerate(trimmed_array.T):\n if index in check_list_range:\n under_list = [value for value in item if value < 5]\n if len(under_list) > under_list_threshold:\n interference_count += 1\n\n if interference_count > 5:\n remove_list.append(list_item)\n\n check_list_out = [value for value in check_list if value not in remove_list]\n check_list_out.sort()\n check_list_out = sorted(check_list_out, reverse=True)\n\n return check_list_out\n\ndef find_cutpoints(array, whiteness_threshold, axis_rollmean, gutter_threshold):\n \"\"\"Iterate through each interval open-close pair to find possible cutpoints\"\"\"\n\n whiteness_score = whiteness_threshold\n white_list = [i for i, r in axis_rollmean.loc[axis_rollmean['values']\n > whiteness_score].iterrows()]\n\n cutpoints = []\n white_check_list = []\n list_iterate = 0\n loop_offset = 0\n while list_iterate < len(white_list) - 1:\n sub_check_list = []\n for i, val in enumerate(white_list[:-1]):\n if list_iterate < len(white_list) - 1:\n if white_list[i + loop_offset] + 1 == white_list[i + loop_offset + 1]:\n sub_check_list.append(white_list[i + loop_offset])\n list_iterate += 1\n else:\n loop_offset = list_iterate + 1\n list_iterate += 1\n break\n\n if len(sub_check_list) + 1 > gutter_threshold:\n white_check_list.append(sub_check_list)\n peak_id = int(statistics.median(sub_check_list))\n cutpoints.append(peak_id)\n print(cutpoints)\n return (white_check_list, cutpoints)\n","sub_path":"codebase_archive/moodys-code_stable-version_2018-02-01/ImageColumnCropOperators.py","file_name":"ImageColumnCropOperators.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"311970671","text":"from typing import List\nfrom collections import deque\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\ndef constructBinaryTree(elemList):\n length = len(elemList)\n if(length == 0):\n return None\n _root = TreeNode(elemList[0])\n def recr(root, num):\n # num: number of node, start by 0 (root)\n leftNumber = 2*num+1\n rightNumber = 2*num+2\n if(leftNumber < length and elemList[leftNumber] != None):\n root.left = TreeNode(elemList[leftNumber])\n recr(root.left, leftNumber)\n else:\n root.left = None\n if(rightNumber < length and elemList[rightNumber] != None):\n root.right = TreeNode(elemList[rightNumber])\n recr(root.right, rightNumber)\n else:\n root.right = None\n recr(_root, 0)\n return _root\n\nclass Solution:\n def allPossibleFBT(self, N: int) -> List[TreeNode]:\n if N%2==0: return None\n if N==1:return [TreeNode(0)]\n ans = []\n for i in range(1,N,2):\n for l in self.allPossibleFBT(i):\n for r in self.allPossibleFBT(N-i-1):\n root = TreeNode(0)\n root.left = l\n root.right = r\n ans.append(root)\n return ans\n\n \nsolution = Solution()\nx = solution.allPossibleFBT(5)\nprint(x) ","sub_path":"leetcode894.py","file_name":"leetcode894.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"449163211","text":"import dotenv\nimport io\nimport logging\nimport os\nimport pprint\nimport shutil\nimport subprocess\n\nfrom abb.jsonld import set_jsonld_context\nfrom tornado.ioloop import IOLoop\nfrom tornado.options import parse_command_line\nfrom tornado.gen import coroutine, maybe_future\nfrom unicodedata import normalize\n\n\nclass ExecutionFailed(RuntimeError):\n pass\n\n\nclass ChangeWorkingDir(object):\n def __init__(self, new_dir):\n self.new_dir = new_dir\n\n def __enter__(self):\n self.working_dir = os.getcwd()\n logging.debug(\"Changing directory: %s -> %s\", self.working_dir, self.new_dir)\n os.chdir(self.new_dir)\n return self\n\n def __exit__(self, *args):\n logging.debug(\"Changing directory back: %s\", self.working_dir)\n os.chdir(self.working_dir)\n\n\ndef rmdirs(*dirs):\n for path in dirs:\n logging.debug(\"Removing directory: %s\", path)\n shutil.rmtree(path, ignore_errors=True)\n\n\ndef slugify(name, replace_old=\" \", replace_new=\"_\"):\n if not name:\n return \"\"\n name = normalize('NFKD', name)\n name = name.encode('ascii', 'ignore')\n name = name.decode(\"ascii\")\n name = name.replace(replace_old, replace_new)\n return name\n\n\ndef logprint(obj, name=\"\"):\n out = io.StringIO()\n pprint.pprint(obj, stream=out, indent=2)\n value = out.getvalue().rstrip()\n if \"\\n\" in value:\n value = \"\\n%s\" % value\n text = \"logprint(%s): %s\" % (name, value)\n return text\n\n\ndef execute(arguments, expect=0, raw=False):\n logging.debug(\"Executing: %s\" % \" \".join(arguments))\n p = subprocess.Popen(arguments, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n result, errors = p.communicate()\n\n if errors:\n errors = errors.decode(\"utf-8\")\n for line in errors.splitlines():\n logging.warning(\"Execution stderr: %s\", line)\n\n if expect is not None:\n if isinstance(expect, int):\n expect = [expect]\n if p.returncode not in expect:\n message = \"Command returned with exit code %s\" % p.returncode\n raise ExecutionFailed(message)\n\n if not raw:\n result = result.decode(\"utf-8\")\n\n return result\n\n\ndef bootstrap(callback, *callback_args):\n envfile = os.path.join(os.getcwd(), '.env')\n dotenv.read_dotenv(envfile)\n argv = parse_command_line()\n set_jsonld_context()\n\n @coroutine\n def runner():\n args = (argv,) + callback_args\n if not callback.__closure__:\n call = coroutine(callback)\n else:\n call = callback\n result = yield maybe_future(call(*args))\n return result\n\n if callback is not None:\n return IOLoop.instance().run_sync(runner)\n\n return True\n","sub_path":"abb/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"536864672","text":"# encoding:utf-8\nimport tensorflow as tf\nfrom tensorflow import keras\n\nimport numpy as np\n\nprint(tf.__version__)\n\nimdb = keras.datasets.imdb\n\n(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)\n\n\n# 一个映射单词到整数索引的词典\nword_index = imdb.get_word_index()\n'''\n0表示padding;\n1表示句子的开始\n2表示unknown\n3表示未使用\n'''\n\n# 保留第一个索引\nword_index = {k:(v+3) for k,v in word_index.items()}\nword_index[\"\"] = 0\nword_index[\"\"] = 1\nword_index[\"\"] = 2 # unknown\nword_index[\"\"] = 3\n\nnum_words = []\nfor i in range(len(train_data)):\n counter = len(train_data[i])\n num_words.append(counter)\nprint('所有词的数量',sum(num_words))\nprint('平均长度',sum(num_words)/len(num_words))\nprint('最大长度',max(num_words))\nprint('最小长度',min(num_words))\n\n'''\nimport matplotlib.pyplot as plt \nfrom matplotlib.font_manager import FontProperties\nfont = FontProperties(fname=r\"c:\\windows\\fonts\\simsun.ttc\", size=15)\nplt.hist(num_words,50,facecolor='g')\nplt.xlabel('文本长度',fontproperties=font)\nplt.ylabel('频次',fontproperties=font)\nplt.axis([0,1200,0,8000])\nplt.show()\nprint(word_index)\n'''\n\n\nreverse_word_index = dict([(value, key) for (key, value) in word_index.items()])\ndef decode_review(text):\n return ' '.join([reverse_word_index.get(i, '?') for i in text])\n# print(decode_review(train_data[0]))\ntrain_data=keras.preprocessing.sequence.pad_sequences(train_data,value=word_index[\"\"],padding='post',maxlen=256)\ntest_data=keras.preprocessing.sequence.pad_sequences(test_data,value=word_index[\"\"],padding='post',maxlen=256)\n\n# 检查已填充的第一条影评\nprint(train_data[0])\n# input shape is the vocabulary count used for the movie reviews(10000 words)\nvocab_size=10000\n\nmodel=keras.Sequential()\nmodel.add(keras.layers.Embedding(vocab_size,64))\nmodel.add(keras.layers.Bidirectional(keras.layers.LSTM(64,return_sequences=True)))\nmodel.add(keras.layers.Bidirectional(keras.layers.LSTM(64)))\nmodel.add(keras.layers.Dense(64,activation=tf.nn.relu))\nmodel.add(keras.layers.Dense(1,activation=tf.nn.sigmoid))\n\ntf.keras.utils.plot_model(model, to_file='model.png', show_shapes=True, show_layer_names=True,rankdir='TB', dpi=900, expand_nested=True)\nmodel.summary()\n\nmodel.compile(optimizer=tf.keras.optimizers.Adam(),\n loss='binary_crossentropy',\n metrics=['accuracy'])\nx_val=train_data[:10000]\npartial_x_train=train_data[10000:]\n\ny_val=train_labels[:10000]\npartial_y_train=train_labels[10000:]\n\n# print(partial_x_train.shape)\n# print(partial_y_train.shape)\nhistory=model.fit(partial_x_train,partial_y_train,epochs=6,batch_size=256,validation_data=(x_val,y_val),verbose=1)\n# results=model.evaluate(test_data,test_labels)\ntf.saved_model.save(model, \"model/BiLSTM2\")\n# print(\"results:\",results)\n# history_dict=history.history\nprint(history.history.keys())\n\nimport matplotlib.pyplot as plt\n\nacc=history.history['accuracy']\nval_acc=history.history['val_accuracy']\nloss=history.history['loss']\nval_loss=history.history['val_loss']\n\nepochs=range(1,len(acc)+1)\n\nplt.plot(epochs,loss,'ro',label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\n# plt.savefig('loss.png')\nplt.show()\n\nplt.clf() # clear figure\nacc_values = history.history['accuracy']\nval_acc_values = history.history['val_accuracy']\n\nplt.plot(epochs, acc, 'ro', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.legend()\n# plt.savefig('acc.png')\nplt.show()\npredictions = model.predict(test_data)\npredictions = (predictions>0.5).astype(int)\n\nfrom sklearn import metrics\nacc = metrics.accuracy_score(test_labels,predictions)\nprint(\"ACC\",acc)\nf1 = metrics.f1_score(test_labels,predictions)\nprint(\"F1\",f1)\nrecall = metrics.recall_score(test_labels,predictions)\nprint(\"recall\",recall)\nprecision= metrics.precision_score(test_labels,predictions)\nprint(\"precision\",precision)\n\n# prediction = model.predict(test_data[0:100])\n# for i in range(100):\n# print(decode_review(test_data[i]))\n# print('预测结果{},标签{}'.format(prediction[i],test_labels[i]))\n\n#\n\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"480637085","text":"from django.conf import settings\nfrom django.contrib import messages\nfrom django.core.signing import BadSignature, Signer\nfrom django.utils.functional import SimpleLazyObject, empty\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom oscar.core.loading import get_class, get_model\n\nfrom .models import FavoriteList\n\nApplicator = get_class('offer.utils', 'Applicator')\nSelector = get_class('partner.strategy', 'Selector')\n\nselector = Selector()\n\n\nclass FavoriteListMiddleware(object):\n\n # Middleware interface methods\n\n def process_request(self, request):\n # Keep track of cookies that need to be deleted (which can only be done\n # when we're processing the response instance).\n request.cookies_to_delete = []\n\n # Load stock/price strategy and assign to request (it will later be\n\n request._favlist_cache = None\n\n def load_full_favlist():\n\n favlist = self.get_favlist(request)\n\n return favlist\n\n def load_favlist_hash():\n\n favoritelist = self.get_favlist(request)\n if favoritelist.id:\n return self.get_favlist_hash(favoritelist.id)\n\n\n request.favlist = SimpleLazyObject(load_full_favlist)\n request.favlist_hash = SimpleLazyObject(load_favlist_hash)\n\n def process_response(self, request, response):\n # Delete any surplus cookies\n cookies_to_delete = getattr(request, 'cookies_to_delete', [])\n for cookie_key in cookies_to_delete:\n response.delete_cookie(cookie_key)\n\n if not hasattr(request, 'favlist'):\n return response\n\n if (isinstance(request.favlist, SimpleLazyObject)\n and request.favlist._wrapped is empty):\n return response\n\n cookie_key = self.get_cookie_key(request)\n\n has_favlist_cookie = (\n cookie_key in request.COOKIES\n and cookie_key not in cookies_to_delete)\n\n if (request.favlist.id and not request.user.is_authenticated()\n and not has_favlist_cookie):\n cookie = self.get_favlist_hash(request.favlist.id)\n response.set_cookie(\n cookie_key, cookie,\n max_age=settings.OSCAR_BASKET_COOKIE_LIFETIME,\n secure=settings.OSCAR_BASKET_COOKIE_SECURE, httponly=True)\n return response\n\n def get_cookie_key(self, request):\n\n return 'favlist'\n\n def process_template_response(self, request, response):\n if hasattr(response, 'context_data'):\n if response.context_data is None:\n response.context_data = {}\n if 'favlist' not in response.context_data:\n response.context_data['favlist'] = request.favlist\n else:\n response.context_data['favlist'] = request.favlist\n return response\n\n # Helper methods\n\n def get_favlist(self, request):\n\n if request._favlist_cache is not None:\n return request._favlist_cache\n\n manager = FavoriteList.objects\n cookie_key = self.get_cookie_key(request)\n cookie_favlist = self.get_cookie_favlist(cookie_key, request)\n\n if hasattr(request, 'user') and request.user.is_authenticated():\n\n try:\n favlist, __ = manager.get_or_create(owner=request.user)\n except FavoriteList.MultipleObjectsReturned:\n\n old_favlists = list(manager.filter(owner=request.user))\n favlist = old_favlists[0]\n\n\n favlist.owner = request.user\n\n if cookie_favlist:\n request.cookies_to_delete.append(cookie_key)\n\n elif cookie_favlist:\n\n favlist = cookie_favlist\n else:\n favlist = FavoriteList()\n favlist.save()\n\n\n request._favlist_cache = favlist\n\n return favlist\n\n def get_cookie_favlist(self, cookie_key, request):\n\n favlist = None\n if cookie_key in request.COOKIES:\n favlist_hash = request.COOKIES[cookie_key]\n try:\n favlist_id = Signer().unsign(favlist_hash)\n favlist = FavoriteList.objects.get(pk=favlist_id, owner=None)\n\n except (BadSignature, FavoriteList.DoesNotExist):\n request.cookies_to_delete.append(cookie_key)\n return favlist\n\n def get_favlist_hash(self, favlist_id):\n return Signer().sign(favlist_id)\n","sub_path":"favoritelist/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":4393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"78126562","text":"\"\"\"...........This file scrape all the product information from the Competitor1 website and store all the data in a database..............\"\"\"\n\nimport header as h\n\n#list of categories for whom data will be scraped from the competitor1 website........\"\"\"\n\ntCategory={'Klänningar','Jackor & Kappor','Toppar & T-shirts','Tröjor','Blusar & Skjortor','Kavajer','T-shirts','Jackor','Linnen','Badkläder'}\nbCategory={'Jeans','Leggings','Shorts','Kjolar','Byxor','Jumpsuits','Tights','Underkläder & Sovplagg','Strumpor och tights','Myskläder'}\n\n#Mapping of a retailer size measure scale into international scale size........\"\"\"\n\nwTops={'32':'X','34':'X','36':'S','38':'M','40':'L','42':'X','44':'X','46':'X','48':'X','50':'X','52':'X'}\nintSize={'XXS','XS','S','M','L','XL','2XL','3XL','4XL','5XL','6XL'}\nwintSize={'XXS':'X','XS':'X','S':'S','M':'M','L':'L','XL':'X','2XL':'X','3XL':'X','4XL':'X','5XL':'X','6XL':'X'}\nwBottom={'22':'X','23':'X','24':'X','25':'X','26':'S','27':'S','28':'M','29':'M','30':'L','31':'L','32':'X','33':'X','34':'X','35':'X','36':'X','37':'X'}\nwShoes={'36':'S','37':'S','38':'M','39':'M','40':'L','41':'L','42':'XL','43':'XL'}\n\n#Scraping the product data from the competitor1 website and stroing the data in a database\n\n\ndef productInfo(bsObj,url,id):\n segment=url.split('/')\n gender=segment[3]\n brand=segment[4].replace('-',' ')\n str=bsObj.find('div',{'id':'breadCrumbData'}).get_text()\n bs4=h.BeautifulSoup(str,\"html.parser\")\n list=bs4.findAll('a')\n category=list[0].get_text()\n subCategory=list[1].get_text()\n name=list[2].get_text()\n\n if category in tCategory or subCategory in tCategory:\n subCategory=category\n category='Top'\n elif category in bCategory or subCategory in bCategory:\n subCategory=category\n category='Bottom'\n\n\n\n h.cur.execute(\"INSERT INTO bub_productinfo (id,brand,name,gender,category,subcategory,date) \"\n \"VALUES (%s,%s,%s,%s,%s,%s,%s)\",(id,brand,name,gender, category, subCategory, h.tDate))\n h.cur.connection.commit()\n return category\n\n#Scraping the product color data from the competitor1 website and stroing the data in a database\n\ndef colorInfo(bsObj,url,id,category):\n try:\n \"\"\"!!!...Color Information....!!!!\"\"\"\n list=bsObj.find('ul',{'class':'ulProdInfo'}).findAll('span')\n colorId=list[1].get_text()\n if len(list) > 3:\n color=list[3].get_text()\n else:\n color='NULL'\n\n\n if bsObj.find('div', {'class': 'divProdPriceSale'}) == None:\n originalPrice = bsObj.find('meta', itemprop='price')['content'].replace(' kr', '')\n originalPrice=float(''.join(i for i in originalPrice if ord(i)<128))\n discountPrice = 0.0\n discountPercentage = 0\n else:\n discountPrice = bsObj.find('meta', itemprop='price')['content'].replace(' kr', '')\n discountPrice=float(''.join(i for i in discountPrice if ord(i)<128))\n price = bsObj.find('div', {'class': 'divProdPriceInfo'})\n originalPrice = price.find('span', {'class': 'spanOrdPrice'}).get_text().replace(' kr', '')\n originalPrice = float(''.join(i for i in originalPrice if ord(i) < 128))\n discountPercentage = int(price.find('b', {'class': 'txtSale'}).get_text().replace('%', ''))\n\n h.cur.execute(\n \"INSERT INTO bub_productcolor (id,colorId,color,pagePath,originalPrice,discountPrice,discountPercentage,date) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)\",\n (id, colorId, color, url, originalPrice, discountPrice, discountPercentage, h.tDate))\n h.cur.connection.commit()\n\n '''!!!....Size Information....!!!!'''\n\n list = bsObj.find('select', id='intProductItemId')\n if list !=None:\n list=list.findAll('option')\n list = list[1:]\n for ele in list:\n sku=int(ele['value'])\n size=ele['title'].split(' ')[0]\n\n if category is 'Skor':\n size = size[0:2]\n if size.isdigit() and 35 < int(size) < 44:\n size = wShoes[size]\n\n elif size in intSize:\n size = wintSize[size]\n\n elif size.isdigit() and 21 < int(size) < 38 and category is 'Bottom':\n size = wBottom[size]\n\n elif size.isdigit() and 31 < int(size) < 53 and int(size) % 2 == 0:\n size = wTops[size]\n\n elif size.find('/') != -1:\n size = size.split('/')[0]\n if size in intSize:\n size = wintSize[size]\n elif size.isdigit() and 21 < int(size) < 38 and category is 'Bottom':\n size = wBottom[size]\n elif size.isdigit() and 31 < int(size) < 53 and int(size) % 2 == 0:\n size = wTops[size]\n\n else:\n size = size[0:2]\n if size in intSize:\n size = wintSize[size]\n elif size.isdigit() and 21 < int(size) < 38 and category is 'Bottom':\n size = wBottom[size]\n elif size.isdigit() and 31 < int(size) < 53 and int(size) % 2 == 0:\n size = wTops[size]\n\n quantity = ele['data-stock']\n if len(quantity) is 0 or int(quantity) is 0:\n availability = 'Out of Stock'\n quantity=0\n else:\n availability = 'In Stock'\n quantity=int(quantity)\n h.cur.execute(\n \"INSERT INTO bub_productsize (colorId,sku,size,availability,quantity,date) VALUES(%s,%s,%s,%s,%s,%s)\",\n (colorId, sku, size, availability, quantity, h.tDate))\n h.cur.connection.commit()\n\n except Exception as e:\n print(\"Error:\\t\",e)\n print(url)\n input('wait')\n return\n","sub_path":"WebScraping/bubble2.py","file_name":"bubble2.py","file_ext":"py","file_size_in_byte":6015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"620708684","text":"# Copyright (C) 2015 by\n# Alessandro Luongo\n# BSD license.\n#\n# Authors:\n# Alessandro Luongo \n#\n\"\"\"Functions for computing the harmonic centrality of a graph.\"\"\"\nfrom collections import Counter\nfrom functools import partial\n\nimport networkx as nx\n\n__all__ = ['harmonic_centrality']\n\n\ndef harmonic_centrality(G, u=None, distance=None, normalized=False, reverse=False):\n r\"\"\"Compute harmonic centrality for nodes.\n\n Harmonic centrality [1]_ of a node `u` is the sum of the reciprocal\n of the shortest path distances from all other nodes to `u`\n\n .. math::\n\n C(u) = \\sum_{v \\neq u} \\frac{1}{d(v, u)}\n\n where `d(v, u)` is the shortest-path distance between `v` and `u`.\n\n Notice that higher values indicate higher centrality.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n \n u : node, optional\n Return only the value for node u\n\n distance : edge attribute key, optional (default=None)\n Use the specified edge attribute as the edge distance in shortest\n path calculations. If `None`, then each edge will have distance equal to 1.\n \n normalized : bool, optional (default=False)\n If True normalize by the number of nodes in the graph.\n\n reverse : bool, optional (default=False)\n If True and G is a digraph, reverse the edges of G, using successors\n instead of predecessors.\n\n Returns\n -------\n nodes : dictionary\n Dictionary of nodes with harmonic centrality as the value.\n\n See Also\n --------\n betweenness_centrality, load_centrality, eigenvector_centrality,\n degree_centrality, closeness_centrality\n\n Notes\n -----\n If the 'distance' keyword is set to an edge attribute key then the\n shortest-path length will be computed using Dijkstra's algorithm with\n that edge attribute as the edge weight.\n\n References\n ----------\n .. [1] Boldi, Paolo, and Sebastiano Vigna. \"Axioms for centrality.\"\n Internet Mathematics 10.3-4 (2014): 222-262.\n \"\"\"\n if distance is not None:\n # use Dijkstra's algorithm with specified attribute as edge weight \n path_length = partial(nx.single_source_dijkstra_path_length, \n weight=distance, reverse=not(reverse))\n else: # handle either directed or undirected\n if G.is_directed() and not reverse:\n path_length = nx.single_target_shortest_path_length\n else:\n path_length = nx.single_source_shortest_path_length\n\n if u is None:\n nodes = G.nodes()\n else:\n nodes = [u]\n\n if normalized:\n normalize_denominator = float( len(G) - 1 )\n else:\n normalize_denominator = 1\n\n harmonic_centrality = {}\n for n in nodes:\n counters = Counter(dict(path_length(G, n)).values())\n if len(counters) > 1:\n harmonic_centrality[n] = sum([count / float(distance) for distance, count in counters.most_common() if distance != 0]) / normalize_denominator\n else:\n harmonic_centrality[n] = 0.0\n if u is not None:\n return harmonic_centrality[u]\n else:\n return harmonic_centrality\n","sub_path":"networkx/algorithms/centrality/harmonic.py","file_name":"harmonic.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"274270010","text":"import requests\nimport json\nimport time \nimport datetime\n\nkeyAPI = \"\"\nrequestsCounter = 0\noutput = open(\"cleanScriptSelf_output.csv\",\"w\")\noutput.write(\"matchID, team, eventType, timestamp, x, y, killer, victim, assistant\\n\")\n'''\nsummonerName = input(\"Enter your summoner name: \")\nsummmonerProfile = requests.get(\"https://na.api.pvp.net/api/lol/na/v1.4/summoner/by-name/\" + str(summonerName) + \"?api_key=\" + keyAPI).json()\ntime.sleep(1)\nrequestsCounter += 1\n\nsummonerName = summonerName.replace(\" \", \"\")\nprint(summonerName)\nprint(summmonerProfile[str(summonerName)][\"id\"])\nenemyJunglerSummonerId = summmonerProfile[str(summonerName)][\"id\"]\n'''\nenemyJunglerSummonerId = input(\"Enter your summoner id: \")\n#grabs summoner name and converts it into his ID so I can grab his matchlist/history\n#CHANGED TO GRAB SUMMONER ID DUE TO THE POSSBILITY OF FREQUENT NAME CHANGE\n\nenemyJunglerMatchList = requests.get(\"https://na.api.pvp.net/api/lol/na/v2.2/matchlist/by-summoner/\"+ str(enemyJunglerSummonerId) + \"?rankedQueues=RANKED_SOLO_5x5&seasons=SEASON2015&api_key&api_key=\" + keyAPI).json()\nprint(\"Pausing 1 second for enemyJunglerMatchList...\")\ntime.sleep(1)\nrequestsCounter += 1 \n#grabs the matchlist\n\nenemyJunglerChampion = \"unknown\"\nenemyJunglerChampionId = input(\"Enter the champion that you're interested in: \")\nchampionList = requests.get(\"https://global.api.pvp.net/api/lol/static-data/na/v1.2/champion?api_key=\" + keyAPI).json()\nprint(\"Pausing 1 second for championList...\")\ntime.sleep(1)\nrequestsCounter += 1 \n#prompts the targeted champion \n\nfor champion in championList[\"data\"]:\n\tif str(championList[\"data\"][champion][\"id\"]) == enemyJunglerChampionId:\n\t\tenemyJunglerChampionId = championList[\"data\"][champion][\"id\"]\n\t\tprint(\"Found champion: \" + champion)\n\t\n#searches for that champion's name\t\n\n'''\neachMatchChampionId = -1\neachMatchIdList = []\neachMatchChampionIdList = []\n\nfor eachMatch in matchList[\"matches\"]:\t\n#\teachMatchId = eachMatch[\"matchId\"]\n\teachMatchIdList.append(eachMatch[\"matchId\"])\n#\teachMatchChampionId = eachMatch[\"champion\"] \n\teachMatchChampionIdList.append(eachMatch[\"champion\"])\n#\tif (eachMatchChampionId == -1):\n#\t\tprint(\"Some error happened and there is no champion found\")\n###### lol, the script worked on accident because matchID is replaced everytime and the champion is also replaced. It will just display the last replacement \t\neachMatchChampionId = eachMatchChampionIdList[4]\nprint(\"eachMatchChampionId is: \" + str(eachMatchChampionIdList[4]))\n#intention: store the matches in a list and look at the selected one\n'''\n'''\nfor i in range(0,matchList[\"totalGames\"]): \n\teachMatchId = matchList[\"matches\"][4][\"matchId\"]\n\teachMatchChampionId = matchList[\"matches\"][4][\"champion\"]\n\tif (eachMatchChampionId == -1):\n\t\tprint(\"Some error happened and there is no champion found\")\nprint(\"eachMatchChampionId is: \" + str(eachMatchChampionId))\nprint(\"Selected eachMatchId is: \" + str(eachMatchId))\n\nmatchData = requests.get(\"https://na.api.pvp.net/api/lol/na/v2.2/match/\" + str(eachMatchId) + \"?includeTimeline=true&api_key=\" + keyAPI).json()\nprint(\"Pausing 1 second for matchData from match: \" + str(eachMatchId))\ntime.sleep(1)\nrequestsCounter += 1\n'''\n'''\nmatchChampionId = -1\nchampionTeamId = -1\nfor matchChampionId in matchData[\"participants\"]:\n\tif eachMatchChampionId == matchChampionId[\"championId\"]:\n\t\tchampionTeamId = matchChampionId[\"teamId\"]\n\t\tprint(championTeamId)\n\t\tprint(eachMatchChampionId)\n#using the championId, find out the team of the summoner, \n\t\t\nenemyTeamId = 100 if (championTeamId == 200) else 200\n#and reverse it\n'''\n'''\nenemyJunglerChampionId = -1\nenemyJunglerParticipantId = -1\nenemyJunglerSummonerId = -1\n\nfor eachParticipant in matchData[\"participants\"]:\n\tif eachParticipant[\"teamId\"] == enemyTeamId:\n\t\tif eachParticipant[\"timeline\"][\"lane\"] == \"JUNGLE\":\n\t\t\tenemyJunglerChampionId = eachParticipant[\"championId\"] \n\t\t\tprint(\"Enemy jungler champion ID: \" + str(enemyJunglerChampionId))\n\t\t\tenemyJunglerParticipantId = eachParticipant[\"participantId\"] \n\t\t\tprint(\"Enemy jungler enemyJunglerParticipantId: \" + str(enemyJunglerParticipantId))\n#after reversing it, find out the enemy champion and enemy participantId\n\t\t\t\nenemyJunglerSummonerId = matchData[\"participantIdentities\"][enemyJunglerParticipantId - 1][\"player\"][\"summonerId\"] \n#so i can find his summonerId\n\nprint(\"enemyJunglerSummonerId: \" + str(enemyJunglerSummonerId))\nenemyJunglerMatchList = requests.get(\"https://na.api.pvp.net/api/lol/na/v2.2/matchlist/by-summoner/\"+ str(enemyJunglerSummonerId) + \"?rankedQueues=RANKED_SOLO_5x5&easons=SEASON2015&api_key&api_key=\" + keyAPI).json()\n#using his summonerId, search through his matchlist/history\n\nprint(\"Pausing 1 second for enemyJunglerMatchList...\")\ntime.sleep(1)\nrequestsCounter += 1\n'''\n\n\nenemyCorrespondingMatchID = []\nfor eachEnemyJunglerMatch in enemyJunglerMatchList[\"matches\"]:\n\tif eachEnemyJunglerMatch[\"matchId\"] not in enemyCorrespondingMatchID and eachEnemyJunglerMatch[\"champion\"] == enemyJunglerChampionId and eachEnemyJunglerMatch[\"lane\"] == \"JUNGLE\": \n\t\tenemyCorrespondingMatchID.append(eachEnemyJunglerMatch[\"matchId\"])\n#for matches that meets the requirements of unique, matching targeted champion id, and the lane is indeed jungle\n\nenemyCorrespondingMatchIdOutputList = []\neventTypeList = []\nplayerSide = []\ntimestampList = [] \nkillerIdList = []\nvictimIdList = [] \nassistantIdList = []\npositionX = []\npositionY = []\n#hit the statistics\ntry:\n\tfor i in range(0,len(enemyCorrespondingMatchID)):\n\t\tif requestsCounter == 99:\n\t\t\tbreak\n\t\ttry:\n\t\t\tenemyMatchData = requests.get(\"https://na.api.pvp.net/api/lol/na/v2.2/match/\" + str(enemyCorrespondingMatchID[i]) + \"?includeTimeline=true&api_key=\" + keyAPI).json()\n\t\t\tprint(\"Current count: \" + str(requestsCounter) + \" , Pausing 1 second for enemyMatchData... \")\n\t\t\ttime.sleep(1)\n\t\t\trequestsCounter += 1\n\n\t\t\tfor eachEnemyMatchDataParticipant in enemyMatchData[\"participantIdentities\"]:\n\t\t\t\tif enemyJunglerSummonerId == str(eachEnemyMatchDataParticipant[\"player\"][\"summonerId\"]):\n\t\t\t\t\tprint(enemyJunglerSummonerId)\n\t\t\t\t\tenemyMatchDataParticipantId = eachEnemyMatchDataParticipant[\"participantId\"]\n\t\t\t\t\tprint(\"enemyMatchDataParticipantId: \" + str(enemyMatchDataParticipantId))\n\t\t\t\t\n\t\t\tenemyMatchDataFrames = enemyMatchData[\"timeline\"][\"frames\"]\n\t\t\tframeNumber = 0\n\t\t\tfor eachFrame in enemyMatchDataFrames:\n\t\t\t\tif \"position\" in eachFrame[\"participantFrames\"][str(enemyMatchDataParticipantId)]:\n\t\t\t\t\tenemyParticipantFramePosition = eachFrame[\"participantFrames\"][str(enemyMatchDataParticipantId)][\"position\"]\n\t\t\t\t\tenemyCorrespondingMatchIdOutputList.append(enemyCorrespondingMatchID[i])\n\t\t\t\t\teventTypeList.append(\"frame\" + str(frameNumber))\n\t\t\t\t\ttimestampList.append(60000 * frameNumber)\n\t\t\t\t\tkillerIdList.append(\"eachFrame data\")\n\t\t\t\t\tvictimIdList.append(\"eachFrame data\")\n\t\t\t\t\tassistantIdList.append(\"eachFrame data\")\n\t\t\t\t\tpositionX.append(enemyParticipantFramePosition[\"x\"])\n\t\t\t\t\tpositionY.append(enemyParticipantFramePosition[\"y\"])\t\n\t\t\t\t\tif enemyMatchDataParticipantId in range(1, 6):\n\t\t\t\t\t\tplayerSide.append(\"Blue\")\n\t\t\t\t\telse: \n\t\t\t\t\t\tplayerSide.append(\"Red\")\n\t\t\t\t\tframeNumber += 1\n\t\t\t\tif \"events\" in eachFrame:\n\t\t\t\t\teventsList = eachFrame[\"events\"]\n\t\t\t\t\tfor eachEvent in eventsList:\n\t\t\t\t\t\tif \"assistingParticipantIds\" in eachEvent: \n\t\t\t\t\t\t\tif enemyMatchDataParticipantId in eachEvent[\"assistingParticipantIds\"]:\n\t\t\t\t\t\t\t\tenemyCorrespondingMatchIdOutputList.append(enemyCorrespondingMatchID[i])\n\t\t\t\t\t\t\t\teventTypeList.append(eachEvent[\"eventType\"])\n\t\t\t\t\t\t\t\ttimestampList.append(eachEvent[\"timestamp\"])\n\t\t\t\t\t\t\t\tkillerIdList.append(eachEvent[\"killerId\"])\n\t\t\t\t\t\t\t\tif \"victimId\" in eachEvent:\n\t\t\t\t\t\t\t\t\tvictimIdList.append(eachEvent[\"victimId\"])\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tvictimIdList.append(\"No victimId found\")\n\t\t\t\t\t\t\t\tassistantIdList.append(eachEvent[\"assistingParticipantIds\"])\n\t\t\t\t\t\t\t\tpositionX.append(eachEvent[\"position\"][\"x\"])\n\t\t\t\t\t\t\t\tpositionY.append(eachEvent[\"position\"][\"y\"])\n\t\t\t\t\t\t\t\tif enemyMatchDataParticipantId in range(1, 6):\n\t\t\t\t\t\t\t\t\tplayerSide.append(\"Blue\")\n\t\t\t\t\t\t\t\telse: \n\t\t\t\t\t\t\t\t\tplayerSide.append(\"Red\")\n\t\t\t\t\t\tif \"position\" in eachEvent: \n\t\t\t\t\t\t\tif \"victimId\" in eachEvent or \"killerId\" in eachEvent:\n\t\t\t\t\t\t\t\tif enemyMatchDataParticipantId == eachEvent[\"killerId\"] or (\"victimId\" in eachEvent and enemyMatchDataParticipantId == eachEvent[\"victimId\"]): ##### for comparison, you have to do specifically eachEvent[\"killerId\"]\n\t\t\t\t\t\t\t\t\tenemyCorrespondingMatchIdOutputList.append(enemyCorrespondingMatchID[i])\n\t\t\t\t\t\t\t\t\teventTypeList.append(eachEvent[\"eventType\"])\n\t\t\t\t\t\t\t\t\ttimestampList.append(eachEvent[\"timestamp\"])\n\t\t\t\t\t\t\t\t\tkillerIdList.append(eachEvent[\"killerId\"])\n\t\t\t\t\t\t\t\t\tif \"victimId\" in eachEvent:\n\t\t\t\t\t\t\t\t\t\tvictimIdList.append(eachEvent[\"victimId\"]) \n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tvictimIdList.append(\"No victimId found\")\n\t\t\t\t\t\t\t\t\tassistantIdList.append(\"he cannot be assisting as he is killer or victim\")\n\t\t\t\t\t\t\t\t\tpositionX.append(eachEvent[\"position\"][\"x\"])\n\t\t\t\t\t\t\t\t\tpositionY.append(eachEvent[\"position\"][\"y\"])\n\t\t\t\t\t\t\t\t\tif enemyMatchDataParticipantId in range(1, 6):\n\t\t\t\t\t\t\t\t\t\tplayerSide.append(\"Blue\")\n\t\t\t\t\t\t\t\t\telse: \n\t\t\t\t\t\t\t\t\t\tplayerSide.append(\"Red\")\n\t\t\t\t\t\telif (\"ITEM_PURCHASED\" in eachEvent.values()) and (enemyMatchDataParticipantId == eachEvent[\"participantId\"]): \n\t\t\t\t\t\t\tenemyCorrespondingMatchIdOutputList.append(enemyCorrespondingMatchID[i])\n\t\t\t\t\t\t\teventTypeList.append(eachEvent[\"eventType\"])\n\t\t\t\t\t\t\ttimestampList.append(eachEvent[\"timestamp\"])\n\t\t\t\t\t\t\tkillerIdList.append(\"Item purchased\")\n\t\t\t\t\t\t\tvictimIdList.append(\"Item purchased\")\n\t\t\t\t\t\t\tassistantIdList.append(\"Item purchased\")\n\t\t\t\t\t\t\tif enemyMatchDataParticipantId in range(1, 6):\n\t\t\t\t\t\t\t\tpositionX.append(str(581))\n\t\t\t\t\t\t\t\tpositionY.append(str(561))\n\t\t\t\t\t\t\t\tplayerSide.append(\"Blue\")\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tpositionX.append(str(15000-581))\n\t\t\t\t\t\t\t\tpositionY.append(str(15000-561))\n\t\t\t\t\t\t\t\tplayerSide.append(\"Red\")\n\t\texcept Exception as e: \n\t\t\tprint(type(e).__name__ + \" \" + str(e.args))\n\t\t\tprint(\"probably a jsondecodeerror\")\nexcept KeyboardInterrupt:\n\tprint(\"Caught interrupt\")\nprint(\"Will this print?\")\n\nfor i in range (0,len(eventTypeList)):\n\toutput.write(str(enemyCorrespondingMatchIdOutputList[i]) + \",\"\n\t+ playerSide[i] + \",\"\n\t+ eventTypeList[i] + \",\" \n\t+ str(timestampList[i]) + \",\"\n\t+ str(positionX[i]) + \",\"\n\t+ str(positionY[i]) + \",\"\n\t+ str(killerIdList[i]) + \",\" \n\t+ str(victimIdList[i]) + \",\" \n\t+ str(assistantIdList[i]))\n\toutput.write(\"\\n\")\t","sub_path":"specifiedTarget.py","file_name":"specifiedTarget.py","file_ext":"py","file_size_in_byte":10345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"2047466","text":"\nimport datetime\nimport pytest\nimport secrets\nimport tempfile\n\n\nfrom starfish.agent.memory_agent import MemoryAgent\nfrom starfish.asset.data_asset import DataAsset\nfrom starfish.account import Account\n\n\ndef create_agent(network):\n agent = MemoryAgent(network)\n assert(agent)\n return agent\n\ndef register_asset(agent):\n asset = DataAsset.create('test memory agent asset', secrets.token_bytes(1024))\n assert(asset)\n asset = agent.register_asset(asset)\n assert(asset)\n return asset\n\ndef create_listing(agent, resources, asset):\n listing = agent.create_listing(resources.listing_data, asset.did)\n assert(listing)\n return (listing)\n\ndef purchase_asset(network, resources, config):\n agent = create_agent(network)\n asset = register_asset(agent)\n listing = create_listing(agent, resources, asset)\n\n account = Account(config.accounts[1].as_dict)\n purchase = agent.purchase_asset(listing, account)\n return purchase, listing, agent, asset, account\n\ndef test_init(network):\n agent = MemoryAgent(network)\n assert(agent)\n\ndef test_register_asset(network, resources, config):\n agent = create_agent(network)\n asset = register_asset(agent)\n listing = create_listing(agent, resources, asset)\n assert(listing)\n assert(listing.listing_id)\n\n\ndef test_get_listing(network, resources, config):\n agent = create_agent(network)\n asset = register_asset(agent)\n listing = create_listing(agent, resources, asset)\n found_listing = agent.get_listing(listing.listing_id)\n assert(found_listing)\n assert(found_listing.listing_id == listing.listing_id)\n\ndef test_search_listings(network, resources, config):\n agent = create_agent(network)\n asset = register_asset(agent)\n listing = create_listing(agent, resources, asset)\n listing_ids = agent.search_listings(resources.listing_data['author'])\n assert(listing_ids)\n assert(len(listing_ids) > 0)\n is_found = False\n for listing_id in listing_ids:\n if listing_id == listing.listing_id:\n is_found = True\n break\n assert(is_found)\n\ndef test_purchase_asset(network, resources, config):\n agent = create_agent(network)\n asset = register_asset(agent)\n listing = create_listing(agent, resources, asset)\n account = Account(config.accounts[1].as_dict)\n purchase = agent.purchase_asset(listing, account)\n assert(purchase)\n\n\ndef test_is_access_granted_for_asset(network, resources, config):\n purchase, listing, agent, asset, account = purchase_asset(network, resources, config)\n assert(agent.is_access_granted_for_asset(asset, account, purchase.purchase_id,))\n\ndef test_consume_asset(network, resources, config):\n purchase, listing, agent, asset, account = purchase_asset(network, resources, config)\n assert(agent.consume_asset(listing, account, purchase.purchase_id,))\n\n","sub_path":"tests/unit/agent/test_memory_agent.py","file_name":"test_memory_agent.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"335816871","text":"import os\nimport urllib2,cookielib\nimport Cookie\n__author__ = 'Administrator'\n\ndef downUrl(url):\n if url is None:\n return None\n filename=\"filecookie\"\n mcj=cookielib.MozillaCookieJar(filename)\n cookiehand= urllib2.HTTPCookieProcessor(mcj)\n opener=urllib2.build_opener(cookiehand)\n\n headers ={\"Referer\":\"http://jwc.xatu.edu.cn/student/navtree.asp\"}\n req=urllib2.Request(url,None,headers)\n # mcj.load(filename,1,1)\n\n ret=opener.open(req)\n mcj.save(filename,1,1)\n if ret.getcode() != 200:\n return None\n return ret.read()","sub_path":"downUrl.py","file_name":"downUrl.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"213544357","text":"import os\nfrom datetime import datetime, timedelta\n\nimport pytz\nfrom airflow import DAG\nfrom airflow.models import Variable\nfrom airflow.operators.docker_operator import DockerOperator\n\nDAG_NAME = os.path.basename(__file__).replace(\".pyc\", \"\").replace(\".py\", \"\")\n\n## Configuration properties\nch1_ad_event_to_daily_backfill_config = Variable.get('ch1_ad_event_to_daily_backfill_config', deserialize_json=True)\nsupport_emails = ch1_ad_event_to_daily_backfill_config['support_emails']\n\n## DAG definition\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': datetime(2020, 4, 27).replace(tzinfo=pytz.timezone('America/Los_Angeles')),\n 'email': support_emails,\n 'email_on_failure': True,\n 'email_on_retry': False,\n 'retries': 2,\n 'retry_delay': timedelta(minutes=1)\n}\n\ndag = DAG(DAG_NAME, default_args=default_args, schedule_interval='15 0 * * *', max_active_runs=1, catchup=True,\n params={\n 'clickhouse_hostname1': ch1_ad_event_to_daily_backfill_config['clickhouse_hostname1'],\n 'clickhouse_database1': ch1_ad_event_to_daily_backfill_config['clickhouse_database1']\n })\n\nbackfill_data = DockerOperator(\n task_id='backfill_data',\n command='./ad_event_to_ad_event_daily_backfill.sh {{ params.clickhouse_hostname1 }} {{ params.clickhouse_database1 }} {{ execution_date.in_tz(\\'America/Los_Angeles\\').to_date_string() }}',\n image='airflow.ad.net:5000/ad.net/ch-backfill:latest',\n volumes=['/root/.ssh/etl_rsa:/root/.ssh/id_rsa'],\n dag=dag)\n\nbackfill_data\n","sub_path":"ch1-2-migration/dag/ch1_ad_event_to_daily_backfill.py","file_name":"ch1_ad_event_to_daily_backfill.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"114449589","text":"from sqlalchemy import Column, Integer, String\n\nfrom include.common.db.model.base import Base\n\n\nclass Posts(Base):\n __tablename__ = \"posts\"\n\n post_id = Column(\"post_id\", Integer, primary_key=True)\n topic_id = Column(\"topic_id\", Integer)\n post_content = Column(\"post_content\", String)\n user_id = Column(\"user_id\", Integer)\n post_deleted = Column(\"post_deleted\", Integer, default=0)\n","sub_path":"src/include/common/db/model/posts.py","file_name":"posts.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"130931595","text":"from flask import Flask, request\nfrom flask_restful import Resource, Api\nimport json\n\nfrom habilidades import Habilidades, ListaHabilidades\n\napp = Flask(__name__)\napi = Api(app)\n\n#pre cadastro\ndesenvolvedores = [\n {\n 'id': 0,\n 'nome': 'Rafael',\n 'habilidade': ['Python', 'Flask']\n },\n {\n 'id': 1,\n 'nome': 'Nanderson',\n 'habilidade': ['Python', 'Django']\n }\n]\n\nclass Desenvolvedor(Resource):\n\n def get(self, id):\n try:\n response = desenvolvedores[id]\n except IndexError:\n msg = 'Desenvolvedor de ID {} nao existe'.format(id)\n response = {'status': 'erro', 'mensagem': msg}\n except Exception:\n msg = 'Erro desconhecido, Procure o administrador!'\n response = {'status': 'erro', 'mensagem': msg}\n return response\n\n def put(self, id):\n try:\n dados = json.loads(request.data)\n if dados not in desenvolvedores:\n desenvolvedores[id] = dados\n response = desenvolvedores[id]\n else:\n msg = 'O Desenvolvedor {} já existe'.format(dados)\n response = {'status': 'erro', 'mensagem': msg}\n except IndexError:\n msg = 'Desenvolvedor de ID {} nao existe'.format(id)\n response = {'status': 'erro', 'mensagem': msg}\n except Exception:\n msg = 'Erro desconhecido, Procure o administrador!'\n response = {'status': 'erro', 'mensagem': msg}\n return response\n\n def delete(self, id):\n try:\n desenvolvedores.pop(id)\n response = desenvolvedores[id]\n except IndexError:\n msg = 'Desenvolvedor de ID {} nao existe'.format(id)\n response = {'status': 'erro', 'mensagem': msg}\n except Exception:\n msg = 'Erro desconhecido, Procure o administrador!'\n response = {'status': 'erro', 'mensagem': msg}\n return response\n\nclass ListaDesenvolvedores(Resource):\n\n def get(self):\n return desenvolvedores\n\n def post(self):\n dados = json.loads(request.data)\n posicao = len(desenvolvedores)\n dados['id'] = posicao\n desenvolvedores.append(dados)\n return {'status': 'sucesso', 'mensagem': 'Registro inserido'}\n\napi.add_resource(ListaDesenvolvedores, '/dev/')\napi.add_resource(Desenvolvedor, '/dev//')\napi.add_resource(ListaHabilidades, '/habilidades/')\napi.add_resource(Habilidades, '/habilidades//')\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n","sub_path":"app_restful.py","file_name":"app_restful.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"425709476","text":"import json\n\nfrom django.apps import AppConfig\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass ProblemConfig(AppConfig):\n name = 'froide.problem'\n verbose_name = _('Problems')\n\n def ready(self):\n from froide.account.export import registry\n\n from . import signals # noqa\n\n registry.register(export_user_data)\n\n\ndef export_user_data(user):\n from .models import ProblemReport\n\n problems = ProblemReport.objects.filter(\n user=user\n ).select_related('message', 'message__request')\n if not problems:\n return\n yield ('problem_reports.json', json.dumps([\n {\n 'message': pb.message.get_absolute_domain_short_url(),\n 'timestamp': pb.timestamp.isoformat(),\n 'resolved': pb.resolved,\n 'kind': pb.kind,\n 'description': pb.description,\n 'resolution': pb.resolution,\n 'resolution_timestamp': (\n pb.resolution_timestamp.isoformat()\n if pb.resolution_timestamp else None\n ),\n }\n for pb in problems]).encode('utf-8')\n )\n","sub_path":"froide/problem/apps.py","file_name":"apps.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"332448663","text":"# Modified from & referencing:\n# https://developers.google.com/gmail/api/quickstart/python\n# https://developers.google.com/gmail/api/auth/scopes\n\nfrom __future__ import print_function\nimport httplib2\nimport os\n\nimport base64\n\nfrom apiclient import discovery\nfrom oauth2client import client\nfrom oauth2client import tools\nfrom oauth2client.file import Storage\n\nfrom email.mime.text import MIMEText\n\ntry:\n import argparse\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\nexcept ImportError:\n flags = None\n\ndef create_message(sender, to, subject, message_text):\n \"\"\"Create a message for an email.\n\n Args:\n sender: Email address of the sender.\n to: Email address of the receiver.\n subject: The subject of the email message.\n message_text: The text of the email message.\n\n Returns:\n An object containing a base64url encoded email object.\n \"\"\"\n message = MIMEText(message_text)\n message['to'] = to\n message['from'] = sender\n message['subject'] = subject\n return {'raw': base64.urlsafe_b64encode(message.as_string())}\n\n#Once you have created a Message object, you can pass it to the drafts.create method to create a Draft object.\n\ndef create_draft(service, user_id, message_body):\n \"\"\"Create and insert a draft email. Print the returned draft's message and id.\n\n Args:\n service: Authorized Gmail API service instance.\n user_id: User's email address. The special value \"me\"\n can be used to indicate the authenticated user.\n message_body: The body of the email message, including headers.\n\n Returns:\n Draft object, including draft id and message meta data.\n \"\"\"\n try:\n message = {'message': message_body}\n draft = service.users().drafts().create(userId=user_id, body=message).execute()\n\n print('Draft id: {}\\nDraft message: {}'.format(draft['id'], draft['message']))\n\n return draft\n except: # errors.HttpError, error:\n print('An error occurred: %s' % error)\n return None\n\ndef send_message(service, user_id, message):\n \"\"\"Send an email message.\n\n Args:\n service: Authorized Gmail API service instance.\n user_id: User's email address. The special value \"me\"\n can be used to indicate the authenticated user.\n message: Message to be sent.\n\n Returns:\n Sent Message.\n \"\"\"\n try:\n message = (service.users().messages().send(userId=user_id, body=message)\n .execute())\n print('Message Id: %s' % message['id'])\n return message\n except Exception as error: # errors.HttpError, error:\n print('An error occurred: %s' % error)\n\n\n# If modifying these scopes, delete your previously saved credentials\n# at ~/.credentials/gmail-python-quickstart.json\n# SCOPES = 'https://www.googleapis.com/auth/gmail.send'\n# SCOPES = \"https://mail.google.com/\"\nSCOPES = \"https://www.googleapis.com/auth/gmail.modify\"\nCLIENT_SECRET_FILE = 'client_secret.json'\nAPPLICATION_NAME = 'Gmail API Python Quickstart'\n\n\ndef get_credentials():\n \"\"\"Gets valid user credentials from storage.\n\n If nothing has been stored, or if the stored credentials are invalid,\n the OAuth2 flow is completed to obtain the new credentials.\n\n Returns:\n Credentials, the obtained credential.\n \"\"\"\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\ndef main():\n \"\"\"Shows basic usage of the Gmail API.\n\n Creates a Gmail API service object and outputs a list of label names\n of the user's Gmail account.\n \"\"\"\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n srvc = discovery.build('gmail', 'v1', http=http)\n\n uid = \"me\"\n sndr = \"me\"\n rcvr = \"hoye@ualberta.ca\"\n subj = \"test\"\n msg = \"Testing the API.\"\n msg = create_message(sndr, rcvr, subj, msg)\n if msg != None:\n send_message(srvc, uid, msg)\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"draft.py","file_name":"draft.py","file_ext":"py","file_size_in_byte":4553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"424113200","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Sep 21 15:04:34 2019\r\n\r\n@author: Sriharsha Komera\r\n\"\"\"\r\n##Iporting required libraries\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n#Importing the dataset\r\npath='F:\\\\Krish\\\\Logistic Regression\\\\Social_Network_Ads.csv'\r\ndataset=pd.read_csv(path)\r\n\r\nX=dataset.iloc[:,[2,3]].values\r\ny=dataset.iloc[:,-1].values\r\n\r\n#splitting the data into train and test\r\nfrom sklearn.model_selection import train_test_split\r\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.25,random_state=0)\r\n\r\n#Feature Scaling\r\nfrom sklearn.preprocessing import StandardScaler\r\nsc=StandardScaler()\r\nX_train=sc.fit_transform(X_train)\r\nX_test=sc.transform(X_test)\r\n\r\n#Creating the Logistic model\r\nfrom sklearn.linear_model import LogisticRegression\r\nclassifier=LogisticRegression(random_state=0)\r\nclassifier.fit(X_train,y_train)\r\n\r\n#predicting\r\ny_pred=classifier.predict(X_test)\r\n\r\n#confusion matrix, accuracy\r\nfrom sklearn.metrics import confusion_matrix, accuracy_score, classification_report\r\ncm=confusion_matrix(y_test,y_pred)\r\naccuracy=accuracy_score(y_test,y_pred)\r\ncr=classification_report(y_test,y_pred)\r\n\r\n#Visualizing the output\r\nfrom matplotlib.colors import ListedColormap\r\nX_set,y_set=X_test,y_test\r\nX1,X2=np.meshgrid(np.arange(start=X_set[:,0].min()-1, stop=X_set[:,0].max()+1,step=0.01),\r\n np.arange(start=X_set[:,0].min()-1, stop=X_set[:,0].max()+1,step=0.01))\r\nplt.contour(X1,X2,classifier.predict(np.array([X1.ravel(),X2.ravel()]).T).reshape(X1.shape),\r\n alpha=0.75, cmap=ListedColormap(('red','green'))) \r\nplt.xlim(X1.min(),X1.max())\r\nplt.ylim(X2.min(),X2.max())\r\nfor i,j in enumerate(np.unique(y_set)):\r\n plt.scatter(X_set[y_set==j,0],X_set[y_set==j,1],\r\n c=ListedColormap(('red','green'))(i),label=j)\r\nplt.title('Logistic Regression (Test set)')\r\nplt.xlabel('Age')\r\nplt.ylabel('Estimated Salary')\r\nplt.legend()\r\nplt.show()\r\n","sub_path":"Logistic_reg.py","file_name":"Logistic_reg.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"594011765","text":"# Copyright 2021 Ross Wightman and Zilliz. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\nfrom torch import nn\nfrom itertools import repeat\nimport collections.abc\nfrom towhee.trainer.models.layers import trunc_normal_, lecun_normal_\n\ndef window_partition(x, window_size: int):\n \"\"\"\n Args:\n x: (b, h, w, c)\n window_size (int): window size\n Returns:\n windows: (num_windows*B, window_size, window_size, c)\n \"\"\"\n b, h, w, c = x.shape\n x = x.view(b, h // window_size, window_size, w // window_size, window_size, c)\n windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, c)\n return windows\n\ndef window_reverse(windows, window_size: int, h: int, w: int):\n \"\"\"\n Args:\n windows: (num_windows*b, window_size, window_size, c)\n window_size (int): Window size\n h (int): Height of image\n w (int): Width of image\n Returns:\n x: (b, h, w, c)\n \"\"\"\n b = int(windows.shape[0] / (h * w / window_size / window_size))\n x = windows.view(b, h // window_size, w // window_size, window_size, window_size, -1)\n x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(b, h, w, -1)\n return x\n\n# From PyTorch internals\ndef _ntuple(n):\n def parse(x):\n if isinstance(x, collections.abc.Iterable):\n return x\n return tuple(repeat(x, n))\n return parse\n\ndef init_vit_weights(module: nn.Module, name: str = '', head_bias: float = 0., jax_impl: bool = False):\n \"\"\" ViT weight initialization\n * When called without n, head_bias, jax_impl args it will behave exactly the same\n as my original init for compatibility with prev hparam / downstream use cases (ie DeiT).\n * When called w/ valid n (module name) and jax_impl=True, will (hopefully) match JAX impl\n \"\"\"\n if isinstance(module, nn.Linear):\n if name.startswith('head'):\n nn.init.zeros_(module.weight)\n nn.init.constant_(module.bias, head_bias)\n elif name.startswith('pre_logits'):\n lecun_normal_(module.weight)\n nn.init.zeros_(module.bias)\n else:\n if jax_impl:\n nn.init.xavier_uniform_(module.weight)\n if module.bias is not None:\n if 'mlp' in name:\n nn.init.normal_(module.bias, std=1e-6)\n else:\n nn.init.zeros_(module.bias)\n else:\n trunc_normal_(module.weight, std=.02)\n if module.bias is not None:\n nn.init.zeros_(module.bias)\n elif jax_impl and isinstance(module, nn.Conv2d):\n # NOTE conv was left to pytorch default in my original init\n lecun_normal_(module.weight)\n if module.bias is not None:\n nn.init.zeros_(module.bias)\n elif isinstance(module, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm2d)):\n nn.init.zeros_(module.bias)\n nn.init.ones_(module.weight)\n\nto_1tuple = _ntuple(1)\nto_2tuple = _ntuple(2)\nto_3tuple = _ntuple(3)\nto_4tuple = _ntuple(4)\nto_ntuple = _ntuple\n","sub_path":"towhee/models/swin_transformer/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"16110843","text":"from shapely.geometry import Point, Polygon\nimport pandas as pd\nimport geopandas as gpd\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom pylab import *\nimport datetime\nimport code\nimport pickle\n\ndef getRadius(buffer):\n lat_degree = 110.54 * 1000\n lon_degree = 111.32 * 1000\n lat_radius = buffer / lat_degree\n lon_radius = buffer / lon_degree\n radius = max(lat_radius,lon_radius)\n return radius\n\ndef stretch_data_for_plotting(df, column, max_rad_limit):\n # This function stretches radiance values and brings them between 0 and 255 for plotting purposes\n # Inputs: column - column that requires values to be stretched\n # max_rad_limit - all radiance values beyond this will be assigned 255\n df[column]= df[column].apply(lambda x: 255 * np.sqrt(np.clip(x, a_min=None, a_max=max_rad_limit)/max_rad_limit))\n return df\n\n#converts a dataframe to geodataframe with buffers instead of points.\n#This is used to produced visualizations very similar to NL visualizations available online.\ndef create_geodataframe(df, radius, cap_style, buffered=True):\n \"\"\"\n radius - in meters\n cap_style - 1 for round buffer and 3 for square buffer\n \"\"\"\n geom = [Point(x,y) for x,y in zip(df[\"Longitude\"], df[\"Latitude\"])]\n gdf = gpd.GeoDataFrame(df, geometry=geom, crs={\"init\":\"epsg:4326\"})\n if buffered == True:\n distance = getRadius(radius)\n gdf[\"geometry\"] = gdf[\"geometry\"].apply(lambda x: x.buffer(distance/2, cap_style=cap_style))\n return gdf\n\ndef plot_geospatial_heatmap_subplots(geo_df, col_name, title, cmap, cmap_type, with_streetmap=False, with_sites=False, add_title=False, ax=None):\n ax = ax\n\n if with_streetmap == True:\n # read Yemen's shapefile and filter out shapes corresponding to regions of interest\n street_map = gpd.read_file(\"../yemen_shp_files/yem_admbnda_adm2_govyem_mola_20181102.shp\")\n street_map = street_map[(street_map.ADM1_EN.isin([\"Amanat Al Asimah\",\"Sana'a\"]))]\n street_map['coords'] = street_map['geometry'].apply(lambda x: x.representative_point().coords[:])\n street_map['coords'] = [coords[0] for coords in street_map['coords']]\n\n if with_sites == True:\n # read damaged structures dataset released by UNITAR\n sites_city = gpd.read_file(\"./extra_datasets/unitar_unisat_data/Damage_Sites_Sanaa_City_20150910.shp\")\n sites_airport = gpd.read_file(\"./extra_datasets/unitar_unisat_data/Damage_Sites_Sanaa_Airport_20150515.shp\")\n\n # piece of code to standardize pixel cmap (Reference: https://stackoverflow.com/questions/28752727/map-values-to-colors-in-matplotlib)\n lst = np.arange(256)\n minima = 0\n maxima = 255\n norm = matplotlib.colors.Normalize(vmin=minima, vmax=maxima, clip=True)\n mapper = cm.ScalarMappable(norm=norm, cmap=cmap)\n geo_df[\"mapped_values\"] = geo_df[col_name].apply(lambda x: mapper.to_rgba(x)[0])\n\n # use legend = True to add colorbar and legend = False to remove colorbar\n final_plot = geo_df.plot(ax=ax, column=\"mapped_values\", markersize=20, cmap=cmap_type, legend=False)\n\n if with_streetmap == True:\n street_map.plot(ax = ax, color = 'white', edgecolor = 'black')\n\n if with_sites == True:\n city.plot(ax=ax, color=\"black\", markersize=10, marker=\"x\", alpha=0.6)\n airport.plot(ax=ax, color=\"black\", markersize=10, marker=\"x\", alpha=0.6)\n\n ax.set_axis_off()\n if add_title==True:\n ax.set_title(title)\n\n return final_plot\n\ndef plot_geospatial_heatmap_with_event_locs(geo_df, col_name, events_data, title, cmap, cmap_type, marker_color, events_data_type, max_stretch=255, needs_colormapping=True, add_title=False, event_locs_included=False, include_colorbar=False, with_streetmap=False, ax=None):\n ax = ax\n\n if with_streetmap == True:\n # read Yemen's shapefile and filter out shapes corresponding to regions of interest\n street_map = gpd.read_file(\"../yemen_shp_files/yem_admbnda_adm2_govyem_mola_20181102.shp\")\n street_map = street_map[(street_map.ADM1_EN.isin([\"Amanat Al Asimah\",\"Sana'a\"]))]\n street_map['coords'] = street_map['geometry'].apply(lambda x: x.representative_point().coords[:])\n street_map['coords'] = [coords[0] for coords in street_map['coords']]\n\n if needs_colormapping == True:\n # piece of code to standardize pixel cmap (Reference: https://stackoverflow.com/questions/28752727/map-values-to-colors-in-matplotlib)\n lst = np.arange(max_stretch+1)\n minima = min(lst)\n maxima = max(lst)\n norm = matplotlib.colors.Normalize(vmin=minima, vmax=maxima, clip=True)\n mapper = cm.ScalarMappable(norm=norm, cmap=cmap)\n geo_df[\"mapped_values\"] = geo_df[col_name].apply(lambda x: mapper.to_rgba(x)[0])\n else:\n geo_df[\"mapped_values\"] = geo_df[col_name]\n\n # use legend = True to add colorbar and legend = False to remove colorbar\n # add argument legend_kwds={'label': \"Z-score values\"}, to add colorbar label\n final_plot = geo_df.plot(ax=ax, column=\"mapped_values\", markersize=30, cmap=cmap_type, legend=include_colorbar, zorder=0)\n # plt.tick_params(labelsize=200)\n\n #NOTE: For polygons, use facecolor=\"none\" to get transparent fill\n if with_streetmap == True:\n street_map.plot(ax=ax, facecolor=\"none\", edgecolor='black', zorder=1, alpha=0.4)\n xlim = ([geo_df.total_bounds[0], geo_df.total_bounds[2]])\n ylim = ([geo_df.total_bounds[1], geo_df.total_bounds[3]])\n ax.set_xlim(xlim)\n ax.set_ylim(ylim)\n\n if event_locs_included == True:\n if events_data_type == \"locations_points\":\n events_data.plot(ax=ax, color=\"yellow\", markersize=100, marker=\"x\", zorder=20)\n elif events_data_type == \"locations_buffered\":\n events_data.plot(ax=ax, facecolor=\"none\", edgecolor=\"yellow\", linewidth=2, zorder=20)\n\n ax.set_axis_off()\n if add_title==True:\n ax.set_title(title)\n\n return final_plot","sub_path":"scripts/yemen_plotting_utils.py","file_name":"yemen_plotting_utils.py","file_ext":"py","file_size_in_byte":5979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"225769454","text":"'''\r\nTransform\r\nPerspective transforms an image\r\n\r\nLast Updated: 2016-May-29\r\nFirst Created: 2016-May-29\r\nPython 2.7\r\nChris\r\n\r\nhttp://www.pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example/\r\n'''\r\n\r\nimport numpy as np\r\nimport cv2\r\n\r\ndef order_points(pts):\r\n '''\r\n Takes a list of four points specifying the (x, y) co-ordinates of each point of the rectangle.\r\n Returns an ordered list of the four points (top-left, top-right, bot-right, bot-left)\r\n '''\r\n # init coords such as entry is top-left, top-right, bot-right, bot-left\r\n rect = np.zeros((4, 2), dtype = 'float32')\r\n\r\n # top-left smallest sum, bot-right largest sum\r\n s = pts.sum(axis = 1)\r\n rect[0] = pts[np.argmin(s)]\r\n rect[2] = pts[np.argmax(s)]\r\n\r\n # now compute the difference between the points: top-right = largest, bot-left = smallest\r\n\r\n diff = np.diff(pts, axis = 1)\r\n rect[1] = pts[np.argmin(diff)]\r\n rect[3] = pts[np.argmax(diff)]\r\n\r\n return rect\r\n\r\ndef four_point_transform(image, pts):\r\n '''\r\n Takes an image and four cords and returns a transformed image.\r\n '''\r\n # obtain a consistent order of the points and unpack them individually\r\n rect = order_points(pts)\r\n (tl, tr, br, bl) = rect\r\n\r\n # compute the width and height of the new image, which will be the max distance\r\n # between suitable co-ordinates\r\n\r\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\r\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\r\n maxWidth = max(int(widthA), int(widthB))\r\n\r\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\r\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\r\n maxHeight = max(int(heightA), int(heightB))\r\n\r\n dst = np.array([[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1],\\\r\n [0, maxHeight - 1]], dtype = 'float32')\r\n\r\n # compute the perspective transform matrix and then apply it\r\n M = cv2.getPerspectiveTransform(rect, dst)\r\n warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))\r\n\r\n return warped\r\n\r\ndef transform_example(image_file, cords):\r\n '''\r\n An example implementation of transform.\r\n '''\r\n img = cv2.imread(image_file)\r\n pts = np.array(cords, dtype = 'float32')\r\n\r\n warped = four_point_transform(img, pts)\r\n\r\n cv2.imshow('Original', img)\r\n cv2.imshow('Warped', warped)\r\n cv2.waitKey(0)\r\n\r\n#transform_example('transform_example_1.jpg', [(75, 255), (378, 130), (505, 280), (200, 470)])\r\n","sub_path":"cv2_transform_and_scan/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"434208538","text":"import os\nimport random\n\nfrom cheroot.wsgi import Server as WSGIServer\nfrom cheroot.ssl.builtin import BuiltinSSLAdapter\nfrom cheroot.ssl.pyopenssl import pyOpenSSLAdapter\nfrom database import db\nfrom src import app, Config, init_db_categories_and_services\nfrom src.models.master import Master\nfrom src.models.category import Category\nfrom src.models.service import Service\n\napp.config['SQLALCHEMY_DATABASE_URI'] = Config.SQLALCHEMY_LOCAL_MY_SQL_DB\napp.config['WTF_CSRF_ENABLED'] = False\napp.config['FLASK_DEBUG'] = 0\napp.config['PRESERVE_CONTEXT_ON_EXCEPTION'] = False\ndb.init_app(app)\nwith app.app_context():\n db.create_all()\n if not Category.query.all():\n init_db_categories_and_services()\n if not Master.query.all():\n for i in range(100):\n num = random.randint(1, 16)\n c = Service.query.get(num)\n m = Master(name='test', surname='test', services=[])\n m.services.append(c)\n db.session.add(m)\n db.session.commit()\n db.session.commit()\ntesting_app = app\ntesting_db = db\n\n\nport = int(os.environ.get('PORT', 8100))\nserver = WSGIServer(('0.0.0.0', port), app)\nadapter = pyOpenSSLAdapter(certificate=\"certificate.pem\", private_key=\"certificate_private_key.pem\")\n# server.ssl_adapter = adapter\n\n\nif __name__ == '__main__':\n try:\n server.start()\n except KeyboardInterrupt:\n server.stop()\n","sub_path":"Project/load_test_server.py","file_name":"load_test_server.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"242398607","text":"import re\nimport unittest\n\ndef Add(numberStr):\n sum = 0\n numberStr = numberStr.replace('\\n', '')\n delimiterChangeMatch = re.search('^//([^0-9_]*)', numberStr)\n if delimiterChangeMatch:\n delimiters = delimiterChangeMatch.group(1).split(\",\")\n numberStr = numberStr.strip(delimiterChangeMatch.group())\n for delimiter in delimiters:\n numberStr = numberStr.replace(delimiter, ',')\n for number in numberStr.split(\",\"):\n addend = int(number)\n if addend > 1000:\n continue\n else:\n sum = sum + addend\n return sum\n\nclass TestAdd(unittest.TestCase):\n \n def test_add(self):\n toTest = [[\"1,2,5\",8],\n [\"1\\n,2,3\",6],\n [\"1,\\n2,4\",7], \n [\"//;\\n1;3;4\",8], \n [\"//$\\n1$2$3\",6],\n [\"2,1001\",2], \n [\"//***\\n1***2***3\",6], \n [\"//$,@\\n1$2@3\",6]]\n for test in toTest:\n self.assertEqual(Add(test[0]),test[1]) \n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"string_calculator.py","file_name":"string_calculator.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"364102800","text":"import os\n\n\nclass WarmUp:\n def __init__(self):\n self.__message = \"Django Challenge\"\n\n def ready(self):\n print(\"---------------------------------\")\n print(f\"Ready for {self.__message}\")\n os.system(\"python -m django --version\")\n print(\"---------------------------------\")\n\n\nuse_me = [\"code\", 1, (\"ready\", {\"there\": \"where\", \"here\": [\n 1, WarmUp(), 3]}, \"study\"), \"girls\"]\n\nobj = use_me[2][1]['here'][1]\nobj.ready()\n","sub_path":"test/warm up/warm_up.py","file_name":"warm_up.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"189975997","text":"# input T-grid model output and extract data from along Thalweg\n# output to file ending in _Thw.nc\nimport numpy as np\nimport xarray as xr\nfrom geopy.distance import great_circle\nfrom sys import argv\nimport re\n\nfname=argv[1]\n\nfname2=fname[:-3]+'_Thw.nc'\nf=nc.Dataset(fname,'r')\nf2=nc.Dataset(fname2,'w')\nfkeys=f.variables.keys()\nfor ik in fkeys:\n match = re.search(r'depth.',ik)\n if match:\n zkey=match.group(0)\nz=f.variables[zkey][:]\nt=f.variables['time_counter'][:]\n\nthw0 = np.loadtxt('/ocean/eolson/MEOPAR/tools/bathymetry/thalweg_working.txt', delimiter=\" \", unpack=False)\nthw2=[tuple((int(k[0]),int(k[1]))) for k in thw0]\n#thw=np.empty(thw0.shape)\n#for ii in range(thw.shape[0]):\n# for jj in range(thw.shape[1]):\n# thw[ii,jj]=int(thw0[ii,jj])\n#thw2=[tuple(k) for k in thw]\n\nf2.createDimension('time_counter',None)\nf2.createDimension('deptht',len(f.dimensions['deptht']))\nf2.createDimension('distance',len(thw2))\n\nidist=np.zeros((len(thw2),1))\ncdist=np.zeros((len(thw2),1))\nfor kk in range(0,len(thw2)):\n jj=thw2[kk][0]\n ii=thw2[kk][1]\n lat=f.variables['nav_lat'][jj,ii]\n lon=f.variables['nav_lon'][jj,ii]\n if kk==0:\n idist[kk]=0;\n cdist[kk]=0;\n else:\n jj=thw2[kk][0]\n ii=thw2[kk][1]\n idist[kk]=great_circle((lat0,lon0),(lat,lon)).km #km\n #gsw.distance([lon0,lon],[lat0,lat])/1000 # km\n cdist[kk]=idist[kk]+cdist[kk-1]\n lat0=lat\n lon0=lon\n\nprint('starting loop')\nfor ik in fkeys:\n if np.size(f.variables[ik].shape) == 4:\n f2var=f2.createVariable(ik,f.variables[ik].datatype,\n ('time_counter','deptht','distance'))\n print(ik)\n thwvar=np.empty((1,len(z),len(thw2)))\n for tt in range(0,len(t)):\n print(tt)\n for kk in range(len(thw2)):\n thwvar[0,:,kk]=f.variables[ik][tt,:,thw2[kk][0],thw2[kk][1]]\n f2var[tt,:,:]=thwvar\n\nnew_tc=f2.createVariable('time_counter',float,('time_counter'))\nnew_tc[:]=t\nnew_z=f2.createVariable('deptht',float,('deptht'))\nnew_z[:]=z\nnew_dist=f2.createVariable('distance',float,('distance'))\nnew_dist[:]=cdist\nf2.close()\nf.close()\n","sub_path":"notebooks/extractThalweg_2.py","file_name":"extractThalweg_2.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"299595552","text":"# GARStow cache operations\n# Copyright 2010, 2014, 2015 Adam Sampson \n\nimport cPickle as pickle\nimport hashlib\nimport os\n\nfrom garstowlib.config import gar_temp_dir\nfrom garstowlib.utils import atomic_open, mkdir_p\n\n_memory_cache = {}\n\ndef cache_get(category, key, type=str):\n\t\"\"\"Get an item from GARStow's cache, returning (value, mtime).\n\tvalue will be None if the item is not in the cache.\n\n\tcategory and key are byte strings.\n\n\tIf type is str, the item is stored directly as a byte string. If type\n\tis unicode, the item is encoded as UTF-8. Otherwise (e.g. if type is\n\tNone), the item is pickled.\"\"\"\n\n\t# Check whether we've already got it in memory first.\n\tglobal _memory_cache\n\tmemorised = _memory_cache.get(category + \"/\" + key)\n\tif memorised is not None:\n\t\treturn memorised\n\n\t# Nope; check the disk.\n\titem_hash = hashlib.sha1(key).hexdigest()\n\tcache_fn = \"%s/%s-cache/%s\" % (gar_temp_dir, category, item_hash)\n\n\ttry:\n\t\tf = open(cache_fn, \"rb\")\n\texcept IOError:\n\t\treturn (None, 0)\n\n\tmtime = os.fstat(f.fileno()).st_mtime\n\n\tif type is str:\n\t\tvalue = f.read()\n\telif type is unicode:\n\t\tvalue = f.read().decode(\"UTF-8\")\n\telse:\n\t\tvalue = pickle.load(f)\n\tf.close()\n\n\treturn (value, mtime)\n\ndef cache_put(category, key, value, type=str):\n\t\"\"\"Put an item into GARStow's cache.\n\n\tcategory and key are byte strings.\n\n\tIf type is str, the item is stored directly as a byte string. If type\n\tis unicode, the item is encoded as UTF-8. Otherwise (e.g. if type is\n\tNone), the item is pickled.\"\"\"\n\n\titem_hash = hashlib.sha1(key).hexdigest()\n\tcache_fn = \"%s/%s-cache/%s\" % (gar_temp_dir, category, item_hash)\n\n\tmkdir_p(os.path.dirname(cache_fn))\n\twith atomic_open(cache_fn, \"wb\") as f:\n\t\tif type is str:\n\t\t\tf.write(value)\n\t\telif type is unicode:\n\t\t\tf.write(value.encode(\"UTF-8\"))\n\t\telse:\n\t\t\tpickle.dump(value, f, pickle.HIGHEST_PROTOCOL)\n\t\tmtime = os.fstat(f.fileno()).st_mtime\n\n\tglobal _memory_cache\n\t_memory_cache[category + \"/\" + key] = (value, mtime)\n","sub_path":"gar.scripts/garstowlib/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"251715271","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pandas as pd\nimport time\n\ndef figsize(scalewidth, ratio = None):\n\tfig_width_pt = 426.79135 # Get this from LaTeX using \\the\\textwidth\n\tin_per_pt = 1.0/72.27 # Convert pt to inch\n\tif ratio is None:\n\t ratio = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (0.61803398875)\n\tfig_width = fig_width_pt*in_per_pt*scalewidth # width in inches\n\tfig_height = fig_width*ratio # height in inches\n\tfig_size = [fig_width,fig_height]\n\treturn fig_size\n\ndef plot():\n\tfig = plt.figure(figsize=figsize(1,0.5))\n\n\tx = np.arange(0.001, 3, 0.01)\n\ty = 4*((x**-12) - (x**-6))\n\n\tplt.plot(x, y)\n\tplt.gca().set_xlabel('$r/\\sigma$')\n\tplt.gca().set_ylabel('$ U_\\mathrm{LJ}/\\\\varepsilon$')\n\n\tplt.gcf().subplots_adjust(bottom=0.15)\n\n\tplt.ylim(-1.5, 5)\n\n\tplt.tight_layout()\n\n\tif __name__ == '__main__':\n\t\tplt.show()\n\n\treturn fig\n\n\nif __name__ == '__main__':\n\tfig = plot()\n\t# fig.savefig('plot.png')\n\n","sub_path":"Figures/plot_MDSLEX2b.py","file_name":"plot_MDSLEX2b.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"542785775","text":"from django.contrib import admin\nfrom .models import Color\n\ndef set_color(modeladmin, request, queryset):\n r,g,b,c = 0,0,0,0\n\n for color in queryset:\n r += getattr(color,'rVal',0)\n g += getattr(color,'gVal',0)\n b += getattr(color,'bVal',0)\n c += 1\n\n r /= c\n g /= c\n b /= c\n\n Color.write_vals(int(r),int(g),int(b))\n set_color.short_description = \"Set Color\"\n\nclass ColorAdmin(admin.ModelAdmin):\n list_display = [\"name\", \"color_preview\", \"rVal\", \"gVal\", \"bVal\"]\n actions = [set_color]\n search_fields = ['name']\n\n\nadmin.site.register(Color, ColorAdmin)","sub_path":"light_control/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"413494118","text":"import findspark\r\nfindspark.init()\r\nfrom pyspark import SparkConf,SparkContext\r\nfrom pyspark.streaming import StreamingContext\r\nfrom pyspark.sql import Row,SQLContext\r\nimport sys\r\nimport requests\r\ndef rc(line):\r\n\r\n t=line.split(\";\")[7]\r\n if ',' not in t:\r\n return [t]\r\n else:\r\n y=t.split(\",\")\r\n return y\r\ndef fab(r):\r\n sr = r.sortBy(lambda x: (-x[1],x[0]))\r\n srr = sr.collect()\r\n c=0\r\n i=0\r\n if(srr!=[]):\r\n while(c!=5):\r\n if(srr[i][0]!=''):\r\n if(c!=4):\r\n print(srr[i][0],end=',')\r\n else:\r\n print(srr[i][0])\r\n c+=1\r\n i+=1\r\nconf=SparkConf()\r\nconf.setAppName(\"BigData\")\r\nab=SparkContext(conf=conf)\r\ncc=StreamingContext(ab,int(sys.argv[2]))\r\ncc.checkpoint(\"~/checkpoint_BIGDATA\")\r\nstream=cc.socketTextStream(\"localhost\",9009)\r\nfinalans=stream.window(int(sys.argv[1]),1).flatMap(rc).map(lambda x : (x, 1)).reduceByKey(lambda a,b:int(a)+int(b))\r\nfinalans.foreachRDD(fab)\r\ncc.start()\r\ncc.awaitTermination(60) #instead of 25\r\ncc.stop()\r\n","sub_path":"adminmgr/media/code/A3/task3/BD_0913_0171_1120_0113_97PFXUQ.py","file_name":"BD_0913_0171_1120_0113_97PFXUQ.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"166853471","text":"#!/usr/bin/env python\n\nimport sys\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nclass ResFile:\n \n def __init__(self):\n self.LOption = False\n self.T = 0\n self.testType = \"\"\n self.splayType = \"\"\n self.setSize = []\n self.averageLookup = []\n \ndef getFileResult(filename):\n \n with open(filename, 'rt') as f:\n content = f.readlines()\n \n fileResult = ResFile()\n \n t1 = content[0].split(\": \")\n fileResult.T = int(t1[1])\n t1 = content[1].split(\": \")\n if t1[1] == \"false\":\n fileResult.LOption = False\n else:\n fileResult.LOption = True\n t1 = content[2].split(\": \")\n \n if t1[1] == \"UNI\":\n fileResult.testType = \"uniform\"\n else:\n fileResult.testType = \"sequential\"\n \n t1 = content[3].split(\": \")\n \n if t1[1] == \"STANDARD\":\n fileResult.splayType = \"standard\"\n else:\n fileResult.splayType = \"naive\"\n \n index = 5\n while index < len(content) - 1:\n t1 = content[index].split(\": \")\n fileResult.setSize.append(int(t1[0]))\n fileResult.averageLookup.append(float(t1[1]))\n index+=1\n \n return fileResult\n \n\ndef task1():\n input_files=[\"splayTest_10_UNI_LFalse_res_STANDARD\",\n \"splayTest_100_UNI_LFalse_res_STANDARD\",\n \"splayTest_1000_UNI_LFalse_res_STANDARD\",\n \"splayTest_10000_UNI_LFalse_res_STANDARD\",\n \"splayTest_100000_UNI_LFalse_res_STANDARD\",\n \"splayTest_1000000_UNI_LFalse_res_STANDARD\"\n ]\n \n\n fig = plt.figure(figsize=(11,6))\n fig.add_axes([0.075, 0.1, 0.75, 0.75])\n ax = plt.gca()\n\n ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.65) \n colors = ['b', 'r', 'm', 'g', 'c', 'k']\n plots = []\n index = 0\n while index < 6:\n fileResult = getFileResult(input_files[index])\n pl, = plt.plot(fileResult.setSize, fileResult.averageLookup, colors[index])\n plots.append(pl)\n index+=1\n\n \n plt.xlabel('Set size')\n plt.ylabel('Average lookup')\n plt.title('Task1: splay = Standard, uniform test, without -l option')\n \n leg = plt.legend(plots[::-1], [\"T = 1 000 000\", \"T = 100 000\", \"T = 10 000\", \"T = 1 000\", \"T = 100\", \"T = 10\"], bbox_to_anchor=(1.01,1), loc=2, borderaxespad=0.5)\n \n \n plt.savefig('../graphs/Task1.png', bbox_inches='tight')\n plt.show()\n \n \n \ndef task2():\n input_files=[\"splayTest_10_UNI_LFalse_res_NAIVE\",\n \"splayTest_100_UNI_LFalse_res_NAIVE\",\n \"splayTest_1000_UNI_LFalse_res_NAIVE\",\n \"splayTest_10000_UNI_LFalse_res_NAIVE\",\n \"splayTest_100000_UNI_LFalse_res_NAIVE\",\n \"splayTest_1000000_UNI_LFalse_res_NAIVE\"\n ]\n fig = plt.figure(figsize=(11,6))\n fig.add_axes([0.075, 0.1, 0.75, 0.75])\n ax = plt.gca()\n\n ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.65) \n\n colors = ['b', 'r', 'm', 'g', 'c', 'k']\n plots = []\n index = 0\n while index < 6:\n fileResult = getFileResult(input_files[index])\n pl, = plt.plot(fileResult.setSize, fileResult.averageLookup, colors[index])\n plots.append(pl)\n index+=1\n\n \n plt.xlabel('Set size')\n plt.ylabel('Average lookup')\n plt.title('Task2: splay = Naive, uniform test, without -l option')\n \n leg = plt.legend(plots[::-1], [\"T = 1 000 000\", \"T = 100 000\", \"T = 10 000\", \"T = 1 000\", \"T = 100\", \"T = 10\"], bbox_to_anchor=(1.01,1), loc=2, borderaxespad=0.5)\n \n \n plt.savefig('../graphs/Task2.png', bbox_inches='tight')\n plt.show()\n \ndef task3():\n input_files=[\"splayTest_100_UNI_LFalse_res_STANDARD\",\n \"splayTest_100_UNI_LFalse_res_NAIVE\",\n \"splayTest_10000_UNI_LFalse_res_STANDARD\",\n \"splayTest_10000_UNI_LFalse_res_NAIVE\",\n \"splayTest_1000000_UNI_LFalse_res_STANDARD\",\n \"splayTest_1000000_UNI_LFalse_res_NAIVE\"\n ]\n fig = plt.figure(figsize=(11,6))\n fig.add_axes([0.075, 0.1, 0.75, 0.75])\n ax = plt.gca()\n\n ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.65) \n\n colors = ['b', 'r', 'm', 'g', 'c', 'k']\n plots = []\n index = 0\n while index < 6:\n fileResult = getFileResult(input_files[index])\n pl, = plt.plot(fileResult.setSize, fileResult.averageLookup, colors[index])\n plots.append(pl)\n index+=1\n\n \n plt.xlabel('Set size')\n plt.ylabel('Average lookup')\n plt.title('Task3: splay = mixed, uniform test, without -l option')\n \n leg = plt.legend(plots[::-1], [\"Naive splay\\nT = 1 000 000\", \"Standard splay\\nT = 1 000 000\", \"Naive splay\\nT = 10 000\", \"Standard splay\\nT = 10 000\", \"Naive splay\\nT = 100\", \"Standard splay\\nT = 100\"], bbox_to_anchor=(1.01,1), loc=2, borderaxespad=0.5)\n \n \n plt.savefig('../graphs/Task3.png', bbox_inches='tight')\n plt.show()\n \ndef task4():\n input_files=[\"splayTest_0_SEQ_LFalse_res_STANDARD\",\n \"splayTest_0_SEQ_LFalse_res_NAIVE\"]\n \n fig = plt.figure(figsize=(11,6))\n fig.add_axes([0.075, 0.1, 0.75, 0.75])\n \n ax = plt.gca()\n ax.set_ylim([0,10])\n\n ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.65) \n\n colors = ['b', 'r']\n plots = []\n index = 0\n while index < 2:\n fileResult = getFileResult(input_files[index])\n #plt.plot(fileResult.setSize, fileResult.averageLookup, colors[index]+'o')\n pl, = plt.plot(fileResult.setSize, fileResult.averageLookup, colors[index])\n plots.append(pl)\n index+=1\n\n \n plt.xlabel('Set size')\n plt.ylabel('Average lookup')\n plt.title('Task4: splay = mixed, sequential test, without -l option')\n \n leg = plt.legend(plots[::-1], [\"Naive splay\", \"Standard splay\"], bbox_to_anchor=(1.01,1), loc=2, borderaxespad=0.5)\n \n \n plt.savefig('../graphs/Task4_closeup.png', bbox_inches='tight')\n plt.show()\n \n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"provide the number of task to be completed\")\n sys.exit()\n \n if sys.argv[1] == \"task1\":\n task1()\n elif sys.argv[1] == \"task2\":\n task2()\n elif sys.argv[1] == \"task3\":\n task3()\n elif sys.argv[1] == \"task4\":\n task4()\n elif sys.argv[1] == \"task5\":\n task5()\n else:\n print(\"wrong task number\")\n sys.exit();\n \n \n \n","sub_path":"C++/Data_Structures_I/SplayTree/python/plotScript.py","file_name":"plotScript.py","file_ext":"py","file_size_in_byte":6580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"15179208","text":"from turtle import Turtle, Screen\ntim = Turtle()\nscreen = Screen()\n\n\n# Create 8 turtles\n# w = forwards, s = backwards, a = counter-clockwise, d = clockwise\n# Turtle draw a curve\n# c = clear drawing, the put the turtle back in the center\n# https://docs.python.org/3/library/turtle.html#turtle.dot\n\ndef move_forwards(): # function no argument\n tim.forward(10) # move forward 10 paces\ndef move_backwards():\n tim.move_backward(10)\ndef turn_left() :\n new_heading = tim.heading() +10 # Turn left at 10 degree each time\n tim.setheading(new_heading)\n\ndef turn_right():\n new_heading = tim.heading() - 10 # Turn right at 10 degree each time\n tim.setheading(new_heading)\n\ndef clear():\n tim.clear() # clear screen after drawing\n tim.penup() # clear all the old roads when the turtle is gone\n tim.home() # the turtle go back home at (0,0)\n tim.pendown() # ready for next moving\n\n\n\n\nscreen.listen()\n#screen.onkey(key =\"space\",fun = move_forwards()) #fun\ta function with no arguments, key\n# a string: key (e.g. “a”) or key-symbol (e.g. “space”)\n\nscreen.onkey(move_forwards, \"w\")\nscreen.onkey(move_backwards, \"s\")\nscreen.onkey(turn_left, \"a\")\nscreen.onkey(turn_right, \"d\")\nscreen.onkey(clear, \"c\")\n\n\nscreen.exitonclick()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"163716432","text":"# Dependencies\nimport numpy as np\n\n# Dara (row=[title, rating])\nbooks = np.array([['Coffe Break Numpy', 4.6],\n ['Lord of the rings', 5.0],\n ['Harry Potter', 4.3],\n ['Winnie the Pooh', 3.9],\n ['The clown of god', 2.2], \n ['Coffe Break Python', 4.7]])\n\n# One-liner\npredict_betseller = lambda x,y : x[x[:,1].astype(float)>y]\n\n# Results\nprint(predict_betseller(books, 3.9))","sub_path":"predict_betseller.py","file_name":"predict_betseller.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"33850731","text":"import pygame, random, math, sys\r\nfrom pygame.locals import *\r\nfrom pygame.time import Clock\r\n\r\npygame.init()\r\n\r\nBOARD_SIZE = 13\r\n\r\nscreen = pygame.display.set_mode((BOARD_SIZE * 50, BOARD_SIZE * 50 + 100))\r\nmyimage = pygame.image.load(\"assets/board.png\")\r\nimagerect = Rect(0, 0, BOARD_SIZE * 50, BOARD_SIZE * 50 + 100)\r\n\r\nFPS = 60\r\nclock = pygame.time.Clock()\r\n\r\nimages = []\r\nfor imNum in range(9):\r\n images.append(pygame.image.load(\"assets/\" + str(imNum) + \".png\"))\r\n\r\nfor imStr in [\"mines\",\"notmines\",\"uk\",\"flag\"]:\r\n images.append(pygame.image.load(\"assets/\" + imStr + \".png\"))\r\n\r\nlines = []\r\nrevealedList = []\r\nhasCascaded = []\r\n\r\ndef clean():\r\n lines.clear()\r\n revealedList.clear()\r\n hasCascaded.clear()\r\n for times in range(BOARD_SIZE):\r\n emptyTab = []\r\n for timesx in range(BOARD_SIZE):\r\n emptyTab.append(0)\r\n revealedList.append(emptyTab)\r\n for x in range(BOARD_SIZE):\r\n xarray = []\r\n for y in range(BOARD_SIZE):\r\n xarray.append(random.choice([10, 10, 10, 10, 9]))\r\n lines.append(xarray)\r\n finalClean()\r\n\r\ndef finalClean():\r\n for l in range(BOARD_SIZE):\r\n for z in range(BOARD_SIZE):\r\n minesNear = 0\r\n if lines[l][z] == 10:\r\n for r in range(max(0, l - 1), min(BOARD_SIZE - 1, l + 1) + 1):\r\n for c in range(max(0, z - 1), min(BOARD_SIZE - 1, z + 1) + 1):\r\n if lines[r][c] == 9: minesNear += 1\r\n lines[l][z] = 8 - minesNear\r\n\r\nclean()\r\n\r\ndef startCascade(l,z):\r\n for r in range(max(0, l - 1), min(BOARD_SIZE - 1, l + 1) + 1):\r\n for c in range(max(0, z - 1), min(BOARD_SIZE - 1, z + 1) + 1):\r\n revealedList[r][c] = 1\r\n if [r,c] not in hasCascaded:\r\n hasCascaded.append([r,c]) # dev note! almost did .push, too much JS for me\r\n if lines[c][r] == 8:\r\n startCascade(r,c)\r\n\r\ndef gameEnd():\r\n endAtRenderStop = True\r\n for revX in range(BOARD_SIZE):\r\n for revY in range(BOARD_SIZE):\r\n revealedList[revX][revY] = 1\r\n render()\r\n\r\nendAtRenderStop = False\r\ngameOn = True\r\ndef render():\r\n screen.fill([255, 255, 255])\r\n screen.blit(myimage, imagerect)\r\n linestart = 100\r\n for line in lines:\r\n numsquare = 0;\r\n for square in line:\r\n loadImg = images[11]\r\n xpos = math.floor(numsquare / 50)\r\n ypos = math.floor((linestart - 100) / 50)\r\n if revealedList[xpos][ypos] == 1:\r\n loadImg = images[square]\r\n if revealedList[xpos][ypos] == 0.5:\r\n loadImg = images[12]\r\n screen.blit(loadImg, Rect(numsquare, linestart, 50, 50))\r\n numsquare = numsquare + 50\r\n linestart = linestart + 50\r\n ev = pygame.event.get()\r\n for event in ev:\r\n if event.type == pygame.QUIT:\r\n pygame.display.quit()\r\n pygame.quit()\r\n sys.exit(0)\r\n elif event.type == pygame.MOUSEBUTTONUP and event.button == 1:\r\n pos = pygame.mouse.get_pos()\r\n posx = pos[0]\r\n posy = pos[1]\r\n if posy > 100:\r\n posy = posy - 100\r\n posx = min(max(math.floor(posx / 50), 0), BOARD_SIZE - 1)\r\n posy = min(max(math.floor(posy / 50), 0), BOARD_SIZE - 1)\r\n revealedList[posx][posy] = 1;\r\n if lines[posy][posx] == 9:\r\n gameEnd()\r\n if lines[posy][posx] == 8:\r\n startCascade(posx,posy)\r\n else:\r\n clean()\r\n elif event.type == pygame.MOUSEBUTTONUP and event.button == 3:\r\n pos = pygame.mouse.get_pos()\r\n posx = pos[0]\r\n posy = pos[1]\r\n if posy > 100:\r\n posy = posy - 100\r\n posx = min(max(math.floor(posx / 50), 0), BOARD_SIZE - 1)\r\n posy = min(max(math.floor(posy / 50), 0), BOARD_SIZE - 1)\r\n if (revealedList[posx][posy] == 0):\r\n revealedList[posx][posy] = 0.5\r\n elif (revealedList[posx][posy] == 0.5):\r\n revealedList[posx][posy] = 0\r\n else:\r\n clean()\r\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_F2:\r\n clean()\r\n pygame.display.flip()\r\n if endAtRenderStop:\r\n gameOn = False\r\n\r\nwhile gameOn:\r\n render()\r\n clock.tick(FPS)\r\n","sub_path":"reversesweeper.py","file_name":"reversesweeper.py","file_ext":"py","file_size_in_byte":4482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"383894575","text":"#! /usr/bin/env python\n\nimport time\nfrom MyAStar4 import myAStar4\nfrom ModifyPath import modifyPath\nfrom CostLinear import costLinear\nfrom CreateModel import CreateModel\nfrom PlotSolution import plotSolution\nfrom CalSmoothness import calSmoothness\n\n###################### Path Planning\n# model\nleg_len = 0.4\nrobot={'xyz': [1.2,0,0], 'rpy': [0,0,0]}\nmodel = CreateModel(robot, leg_len)\n\nts=time.time()\n\n# Astar\nAS_obj = myAStar4(model)\nsol = {'x':[], 'y':[], 'robotD':[], 'cost':0.0, 'path_length':0, 'violation':0, 'smoothness':0, 'pTime':0}\nsol['x'] = [x[0] for x in AS_obj.optimal_path]\nsol['y'] = [y[1] for y in AS_obj.optimal_path]\nsol['robotD'] = AS_obj.robotD\n\n# process time\ntf=time.time()\nelapsed_t = tf-ts\nsol['pTime'] = round(elapsed_t, 2)\nprint('process time = %.2f' % elapsed_t)\n\n# Cost\ncostObj = costLinear(model, sol)\nsol = costObj.sol\n\n# smoothness\nsmObj = calSmoothness(sol['x'], sol['y'])\nsol['smoothness']=smObj.smoothness\n\n# sol\nprint(sol)\n\n###################### modify path\n\n# Astar with path modification\nts=time.time()\nmsol = {'x':[], 'y':[], 'robotD':[], 'cost':0.0, 'path_length':0, 'violation':0, 'smoothness':0, 'pTime':0}\nmpObj = modifyPath (model, AS_obj.optimal_path, AS_obj.robotD)\nmsol['x'] = [x[0] for x in mpObj.m_path]\nmsol['y'] = [y[1] for y in mpObj.m_path]\nmsol['robotD'] = mpObj.mrobotD\n\n# process time of m_path\ntf=time.time()\nelapsed_t = tf-ts\nmsol['pTime'] = round(elapsed_t + sol['pTime'], 2)\nprint('process time for m_path = %.2f' % msol['pTime'])\n\n# Cost of m_path\nm_costObj = costLinear(model, msol)\nmsol = m_costObj.sol\n\n# smoothness of m_path\nmsmObj = calSmoothness(msol['x'], msol['y'])\nmsol['smoothness']=msmObj.smoothness\n\n# sol\nprint(msol)\n\n###################### plot solution\nplotSolution(model, sol, msol)\n","sub_path":"src/pp_single/my_astar_p/src/my_astar_p/Run.py","file_name":"Run.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"60877749","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2017-2020- Swiss Data Science Center (SDSC)\n# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and\n# Eidgenössische Technische Hochschule Zürich (ETHZ).\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Base classes for Model objects used in Python SDK.\"\"\"\n\nimport os\nfrom collections import deque\nfrom pathlib import Path\n\n\nclass Model(object):\n \"\"\"Abstract response of a single object.\"\"\"\n\n IDENTIFIER_KEY = \"identifier\"\n\n def __init__(self, response=None, client=None, collection=None):\n \"\"\"Create a representation of an object on the server.\"\"\"\n self._response = response if response is not None else {}\n self._client = client\n self._collection = collection\n\n @property\n def id(self):\n \"\"\"The identifier of the object.\"\"\"\n return self._response[self.IDENTIFIER_KEY]\n\n def __str__(self):\n \"\"\"Format model.\"\"\"\n return \"<{0.__class__.__name__} '{0.id!s}'>\".format(self)\n\n __repr__ = __str__\n\n\nclass Collection(object):\n \"\"\"Abstract response of multiple objects.\"\"\"\n\n class Meta:\n \"\"\"Store information about the model.\"\"\"\n\n model = None\n \"\"\"Define the type of object this collection represents.\"\"\"\n\n headers = \"id\"\n \"\"\"Which fields to use as headers when printing the collection.\"\"\"\n\n def __init__(self, client=None):\n \"\"\"Create a representation of objects on the server.\"\"\"\n self._client = client\n\n def list(self):\n \"\"\"Return a list if the collection is iterable.\"\"\"\n if not hasattr(self, \"__iter__\"):\n raise NotImplementedError(\"The collection is not iterable.\")\n return list(self)\n\n\nclass LazyResponse(dict):\n \"\"\"Lazy load object properties.\"\"\"\n\n def __init__(self, getter, *args, **kwargs):\n \"\"\"Initialize LazyRequest.\"\"\"\n self._getter = getter\n self._called = False\n super(LazyResponse, self).__init__(*args, **kwargs)\n\n def __getitem__(self, key):\n \"\"\"Implement KeyError check.\"\"\"\n try:\n return dict.__getitem__(self, key)\n except KeyError:\n if not self._called:\n self.update(**self._getter())\n self._called = True\n return dict.__getitem__(self, key)\n raise\n\n\nclass IndexedList(list):\n \"\"\"List allowing to query items by id or by named index.\n\n Example:\n >>> from collections import namedtuple\n >>> Item = namedtuple('Item', 'key, value')\n >>> items = IndexedList(Item('a', 1), Item('b', 2), attr='key')\n >>> items[0].value\n 1\n >>> items['a'].value\n 1\n >>> items.b.value\n 2\n >>> items[0] in items\n True\n >>> 'a' in items\n True\n >>> 'c' not in items\n True\n\n The attribute name must be always defined.\n\n >>> IndexedList()\n Traceback (most recent call last):\n ...\n ValueError: The attribute name must be defined.\n\n \"\"\"\n\n __slots__ = (\"_attr_name\", \"_prefix\")\n\n def __new__(cls, *args, attr=None, prefix=\"\"):\n \"\"\"Call list constructor.\"\"\"\n return super().__new__(cls)\n\n def __init__(self, *args, attr=None, prefix=\"\"):\n \"\"\"Store index information.\"\"\"\n if attr is None:\n raise ValueError(\"The attribute name must be defined.\")\n\n self._attr_name = attr\n self._prefix = prefix\n\n self.extend(args)\n\n def __contains__(self, attr):\n \"\"\"Check existence of attribute value or object itself.\"\"\"\n #: Check if the instance is in the list.\n rval = list.__contains__(self, attr)\n if rval:\n return rval\n\n #: Find item by attribute value.\n try:\n getattr(self, attr)\n return True\n except (AttributeError, TypeError):\n return False\n\n def __getattr__(self, attr):\n \"\"\"Find item by named index.\"\"\"\n attr_name = self._prefix + attr\n for item in self:\n #: Find object by attribute value.\n if getattr(item, self._attr_name) == attr_name:\n return item\n\n #: Return instance attrubutes.\n return list.__getattribute__(self, attr)\n\n def __getitem__(self, index):\n \"\"\"Find item by named index.\"\"\"\n if isinstance(index, int):\n return list.__getitem__(self, index)\n\n try:\n return getattr(self, index)\n except AttributeError:\n raise IndexError(\"No item found with id {0}\".format(self._prefix + index))\n\n\nclass DirectoryTree(dict):\n r\"\"\"Create a safe directory tree from paths.\n\n Example usage:\n\n >>> directory = DirectoryTree()\n >>> directory.add('a/b/c')\n >>> directory.add('a/b/c/d')\n >>> directory.add('x/y/z')\n >>> directory.add('x/y/zz')\n >>> print('\\n'.join(sorted(directory)))\n a/b/c/d\n x/y/z\n x/y/zz\n >>> print('\\n'.join(sorted(directory.get('x/y'))))\n z\n zz\n\n \"\"\"\n\n @classmethod\n def from_list(cls, values):\n \"\"\"Construct a tree from a list with paths.\"\"\"\n self = cls()\n for value in values:\n self.add(value)\n return self\n\n def get(self, value, default=None):\n \"\"\"Return a subtree if exists.\"\"\"\n path = value if isinstance(value, Path) else Path(str(value))\n subtree = self\n for part in path.parts:\n try:\n subtree = subtree[part]\n except KeyError:\n return default\n return subtree\n\n def add(self, value):\n \"\"\"Create a safe directory from a value.\"\"\"\n path = value if isinstance(value, Path) else Path(str(value))\n if path and path != path.parent:\n destination = self\n for part in path.parts:\n destination = destination.setdefault(part, DirectoryTree())\n\n def __iter__(self):\n \"\"\"Yield all stored directories.\"\"\"\n filter = {\n os.path.sep,\n }\n queue = deque()\n queue.append((self, []))\n\n while queue:\n data, parents = queue.popleft()\n for key, value in dict.items(data):\n if key in filter:\n continue\n if value:\n queue.append((value, parents + [key]))\n else:\n yield os.path.sep.join(parents + [key])\n","sub_path":"renku/core/models/datastructures.py","file_name":"datastructures.py","file_ext":"py","file_size_in_byte":6839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"186752410","text":"# input has format: ['CHIplay checkers', 'CHIbig drum', 'MOTbig drum']\n# output: list of tuples, in which the first element tags the speaker, \n# and the second contains that speaker's merged turn\ndef merge_speakers(input):\n d = []\n count = 0\n c = ''\n m = ''\n while count < len(input):\n for s, i in enumerate(input): \n if i.startswith('CHI'):\n d.append(m)\n m = ''\n c += str(i)\n elif i.startswith('MOT'):\n d.append(c)\n c = ''\n m += str(i)\n count = count+1\n f = []\n for i in d:\n if i != '':\n f.append(i)\n\n merged = []\n for item in f:\n if item.startswith('CHI'):\n item = item.replace('CHI', '', 1)\n item = item.replace('CHI', ' ')\n pair = ('CHI', item)\n merged.append(pair)\n if item.startswith('MOT'):\n item = item.replace('MOT', '', 1)\n item = item.replace('MOT', ' ')\n pair = ('MOT', item)\n merged.append(pair)\n return(merged)\n","sub_path":"Functions/adj_turn_merge.py","file_name":"adj_turn_merge.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"261594819","text":"from . import *\nfrom app.irsystem.models.helpers import *\nfrom app.irsystem.models.helpers import NumpyEncoder as NumpyEncoder\nimport math\nimport random\nimport os\nimport sys\nfrom nltk.tokenize import TreebankWordTokenizer\nfrom nltk.stem import PorterStemmer\nimport ast\nimport cPickle as pickle\nimport time\nimport numpy as np\nfrom operator import itemgetter\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn.decomposition import NMF\nimport scipy.sparse\n\n##\nreload(sys)\nsys.setdefaultencoding('utf8')\n\n@irsystem.route('/favicon.ico')\ndef favicon():\n\treturn send_from_directory(os.path.join(irsystem.root_path, 'static'),\n\t\t\t\t\t\t 'favicon.ico',mimetype='image/vnd.microsoft.icon')\n\nfirst_search = 0\nlocal = ''\nproject_name = \"RecommenTED\"\nnet_id = \"Priyanka Rathnam (pcr43), Minzhi Wang (mw787), Emily Sun (eys27), Lillyan Pan (ldp54), Rachel Kwak (sk2472)\"\n\ntokenizer = TreebankWordTokenizer()\n\nstemmer=PorterStemmer()\n\nstart_time = time.time()\n\nwith open('new_transcripts.pickle', 'rb') as transcript_handle:\n\tprint(\"new_transcripts.pickle --- %s seconds ---\" % (time.time()-start_time))\n\tstart_time = time.time()\n\ttranscript_url_dict = pickle.load(transcript_handle)\n\nwith open('new_descriptions.pickle', 'rb') as description_handle:\n\tprint(\"new_descriptions --- %s seconds ---\" % (time.time()-start_time))\n\tstart_time = time.time()\n\tdescription_url_dict = pickle.load(description_handle)\n\nwith open('all_talks.pickle') as all_talks_handle:\n\tprint(\"all_talks --- %s seconds ---\" % (time.time()-start_time))\n\tstart_time = time.time()\n\tall_talks = pickle.load(all_talks_handle)\n\nwith open('inv_idx_transcript.pickle', 'rb') as inv_transcript_handle:\n\tprint(\"inv_idx_transcript --- %s seconds ---\" % (time.time()-start_time))\n\tstart_time = time.time()\n\tinv_idx_transcript = pickle.load(inv_transcript_handle)\n\nwith open('inv_idx_description.pickle', 'rb') as inv_description_handle:\n\tprint(\"inv_idx_description --- %s seconds ---\" % (time.time()-start_time))\n\tstart_time = time.time()\n\tinv_idx_description = pickle.load(inv_description_handle)\n\nwith open('idf_transcript.pickle', 'rb') as idf_transcript_handle:\n\tprint(\"idf_transcript --- %s seconds ---\" % (time.time()-start_time))\n\tstart_time = time.time()\n\tidf_transcript = pickle.load(idf_transcript_handle)\n\nwith open('idf_description.pickle', 'rb') as idf_description_handle:\n\tprint(\"idf_description --- %s seconds ---\" % (time.time()-start_time))\n\tstart_time = time.time()\n\tidf_description = pickle.load(idf_description_handle)\n\nwith open('doc_norms_transcript.pickle', 'rb') as doc_norms_transcript_handle:\n\tprint(\"doc_norms_transcript --- %s seconds ---\" % (time.time()-start_time))\n\tstart_time = time.time()\n\tdoc_norms_transcript = pickle.load(doc_norms_transcript_handle)\n\nwith open('doc_norms_description.pickle', 'rb') as doc_norms_description_handle:\n\tprint(\"doc_norms_description --- %s seconds ---\" % (time.time()-start_time))\n\tdoc_norms_description = pickle.load(doc_norms_description_handle)\n\nwith open('100scclusterId_to_tedId2.pickle', 'rb') as clusterId_to_tedId_handle:\n\tprint(\"clusterId_to_tedId2 --- %s seconds ---\" % (time.time()-start_time))\n\tclusterId_to_tedId = pickle.load(clusterId_to_tedId_handle)\n\nwith open('100sctedId_to_clusterId.pickle', 'rb') as tedId_to_clusterId_handle:\n\tprint(\"tedId_to_clusterId2 --- %s seconds ---\" % (time.time()-start_time))\n\ttedId_to_clusterId = pickle.load(tedId_to_clusterId_handle)\n\nwith open('topic_dict.pickle', 'rb') as topic_dict_handle:\n\tprint(\"topic_dict --- %s seconds ---\" % (time.time()-start_time))\n\ttopic_dict = pickle.load(topic_dict_handle)\n\nwith open('topic_name_dict.pickle', 'rb') as topic_name_dict_handle:\n\tprint(\"topic_name_dict --- %s seconds ---\" % (time.time()-start_time))\n\ttopic_name_dict = pickle.load(topic_name_dict_handle)\n\nwith open('name_topic_dict.pickle', 'rb') as name_topic_dict_handle:\n\tprint(\"name_topic_dict --- %s seconds ---\" % (time.time()-start_time))\n\tname_topic_dict = pickle.load(name_topic_dict_handle)\n\n#svd_similarity = scipy.sparse.load_npz('sparse_matrix.npz')\n#print(svd_similarity)\n#svd_similarity = [[]]\nsvd_similarity = np.load(\"svd_similarity.npy\")\ndoc_topic_score = np.load(\"doc_topic_score.npy\")\n\ndef compute_score(q, index, idf, doc_norms, q_weights):\n\tresults = np.zeros(len(doc_norms))\n\tfor term in q:\n\t\tpostings = []\n\t\tif term not in index.keys():\n\t\t\tcontinue\n\t\telse:\n\t\t\tpostings = index[term]\n\n\t\tfor doc_id, tf in postings:\n\t\t\twij = tf*idf[term]\n\t\t\twiq = q.count(term)*idf[term]\n\t\t\tq_weights[term] = wiq\n\n\t\t\tresults[doc_id] += wij*wiq\n\n\t# Find query norm\n\tq_norm = 0\n\tfor w in q_weights.values():\n\t\tq_norm += w*w\n\tq_norm = math.sqrt(q_norm)\n\n\t# Normalize\n\treturn results/(doc_norms*q_norm+1)\n\ndef index_search(query, transcript_index, description_index, transcript_idf, description_idf, transcript_doc_norms, description_doc_norms):\n\t# Tokenize query\n\tq = [stemmer.stem(word.decode('utf-8')) for word in tokenizer.tokenize(query.lower())]\n\tq_weights = {}\n\n\ttranscript_scores = compute_score(q, transcript_index, transcript_idf, transcript_doc_norms, q_weights)\n\tdescription_scores = compute_score(q, description_index, description_idf, description_doc_norms, q_weights)\n\n # change results to (score, doc_id) format\n\tresults = [(transcript_scores[i] + description_scores[i], i) for i in range(0, len(transcript_scores))]\n\n\t# sort results by score\n\tresults.sort()\n\n\t# if no relevant videos are found, return randomly sorted list of videos\n\tif results[-1][0] == 0:\n\t\trandom.shuffle(results)\n\n\treturn results[::-1]\n\ndef search_by_author(name, all_talks):\n\ttalks_by_author = []\n\tfor key, value in all_talks.items():\n\t\tif value[\"speaker\"].lower() == name.lower():\n\t\t\ttalks_by_author.append(value)\n\treturn talks_by_author\n\ndef search_by_title(title, all_talks):\n\ttalk_titles = []\n\tfor key, value in all_talks.items():\n\t\tif value['title'].lower() == title.lower():\n\t\t\ttalk_titles.append(value)\n\treturn talk_titles\n\ndef get_docs_from_cluster(target_id, cluster, inv_idx, idf, cluster_len):\n\tsimilarity_list = []\n\tfor doc_id in cluster:\n\t\tif (doc_id != target_id):\n\t\t\tsimilarity_list.append((svd_similarity[target_id, doc_id], doc_id))\n\ttop_docs = []\n\t# Subtract one to remove the target_id\n\tmax_len = min(5, cluster_len - 1)\n\twhile len(top_docs) < max_len:\n\t\tscore, doc_id = max(similarity_list)\n\t\ttop_docs.append(doc_id)\n\t\tsimilarity_list.remove((score, doc_id))\n\t#print([all_talks[doc_id][\"title\"] for doc_id in top_docs])\n\treturn top_docs\n\ndef sortData(data, sort_criteria):\n\tif sort_criteria == \"None\":\n\t\treturn data\n\telif sort_criteria == \"views\":\n\t\tfor talk in data:\n\t\t\ttalk[sort_criteria] = int(talk[sort_criteria])\n\t\tdata = sorted(data, key=itemgetter(sort_criteria), reverse=True)\n\t\treturn data\n\telse:\n\t\tdata = sorted(data, key=itemgetter(sort_criteria), reverse=True)\n\t\treturn data\n\n@irsystem.route('/', methods=['GET'])\ndef search():\n\tfirst_search = request.args.get('first_search')\n\toutput_query = request.args.get('output_query')\n\tquery = request.args.get('query')\n\tif first_search:\n\t\toutput_query = first_search\n\t\tquery = output_query\n\tsortBy = request.args.get('sortBy')\n\tif not sortBy:\n\t\tsortBy = 'None'\n\ttopic_search = request.args.get('topic_search')\n\ttopic_output = False\n\tdata = []\n\tsimilar_talks = []\n\tcluster_res = []\n\tauthor_talks = []\n\ttop_topics = []\n\tclus_talks_add = 0\n\t# output_query = ''\n\t# global first_search\n\t# global local\n\n\t# Improve query from topic buttons\n\tif topic_search is not None:\n\t\ttopic_output = True\n\t\ttopic_idx = name_topic_dict[topic_search]\n\t\ttopic_stems = topic_dict[topic_idx]\n\t\tquery = ' '.join(topic_stems) + \" \" + query\n\n\tif first_search is None and output_query is None:\n\t\toutput_message = \"\"\n\telif not first_search and not output_query:\n\t\toutput_message = \"Please enter a valid query.\"\n\telse:\n\t\tauthor_talks = search_by_author(query, all_talks)\n\n\t\ttitle_talks = search_by_title(query, all_talks)\n\n\t\tdata = author_talks + title_talks\n\n\t\ttop_10 = index_search(query, inv_idx_transcript, inv_idx_description, idf_transcript, idf_description, doc_norms_transcript, doc_norms_description)[:10]\n\n\t\tfor score, doc_id in top_10:\n\t\t\tif all_talks[doc_id] not in data and len(data) < 10:\n\t\t\t\tdata.append(all_talks[doc_id])\n\t\t\t\tsimilar_talks.append(all_talks[doc_id])\n\n\n\t\t# Get cluster from top document\n\t\ttop_talk_id = top_10[0][1]\n\t\tcluster_id = tedId_to_clusterId[top_talk_id]\n\t\tcluster_lst = clusterId_to_tedId[cluster_id]\n\t\tcluster_lst_len = len(cluster_lst)\n\n\t\tif cluster_lst_len > 1:\n\t\t\tsimilarity_list = []\n\t\t\ttop_cluster_talks = get_docs_from_cluster(top_talk_id, cluster_lst, inv_idx_transcript, idf_transcript, cluster_lst_len)\n\t\t\t# May be the case that there is less than 5 docs in cluster\n\t\t\tfor doc_id in top_cluster_talks:\n\t\t\t\tif all_talks[doc_id] not in data and all_talks[doc_id] not in top_10:\n\t\t\t\t\tcluster_res.append(all_talks[doc_id])\n\n\t\t# Add Talk Types\n\t\tfor talk in author_talks:\n\t\t\ttalk['type'] = 'author'\n\t\tfor talk in title_talks:\n\t\t\ttalk['type'] = 'title'\n\t\tfor talk in similar_talks:\n\t\t\ttalk['type'] = 'text'\n\t\tfor talk in cluster_res:\n\t\t\ttalk['type'] = 'cluster'\n\n\t\t# User searches by title\n\t\tif len(title_talks) != 0:\n\t\t\t# Not enough results in cluster\n\t\t\tif (5 + len(title_talks) < len(cluster_res)):\n\t\t\t\tsim_talks_add = 10 - len(title_talks) - len(cluster_res)\n\t\t\t\tclus_talks_add = len(cluster_res)\n\t\t\t# Enough results in cluster\n\t\t\telse:\n\t\t\t\tsim_talks_add = min(10 - len(title_talks),5)\n\t\t\t\tclus_talks_add = 10 - len(title_talks) - sim_talks_add\n\t\t\tdata = title_talks + similar_talks[0:sim_talks_add] + cluster_res[0:clus_talks_add]\n\n\t\t# User searches by author\n\t\telif len(author_talks) != 0:\n\t\t\t# Not enough results in cluster\n\t\t\tif (5 + len(author_talks) < len(cluster_res)):\n\t\t\t\tsim_talks_add = 10 - len(author_talks) - len(cluster_res)\n\t\t\t\tclus_talks_add = len(cluster_res)\n\t\t\t# Enough results in cluster\n\t\t\telse:\n\t\t\t\tsim_talks_add = min(10 - len(author_talks),5)\n\t\t\t\tclus_talks_add = 10 - len(author_talks) - sim_talks_add\n\t\t\tdata = author_talks + similar_talks[0:sim_talks_add] + cluster_res[0:clus_talks_add]\n\n\t\t# User searches by content\n\t\telse:\n\t\t\tsim_talks_add = 10 - len(cluster_res)\n\t\t\tdata = similar_talks[0:sim_talks_add] + cluster_res\n\n\t\tdata = sortData(data, sortBy)\n\t\t#print(clus_talks_add)\n\n\n\t\t# Topic modeling\n\t\ttop_ids = [doc[1] for doc in top_10[:2]]\n\t\t# row_sum = np.sum(doc_topic_score, axis=1)\n\t\t# normalized = doc_topic_score/row_sum[:, np.newaxis]\n\t\ttopic_lists = np.array([doc_topic_score[i] for i in top_ids])\n\t\t# get indices of top 5 topics\n\t\tidx = np.argpartition(topic_lists, topic_lists.size-5, axis=None)[-5:]\n\t\ttop_xy = [divmod(i, topic_lists.shape[1]) for i in idx]\n\t\ttopics_idx = [i[1] for i in top_xy]\n\n\t\t#topics_idx = np.argsort(doc_topic_score[top_talk_id])[::-1]\n\t\ttop_topics = [topic_name_dict[i] for i in set(topics_idx[:5]) if i in topic_name_dict]\n\n\t\tif top_10[0][0] == 0:\n\t\t\toutput_message = \"No results for \\\"\" + output_query + \"\\\". Here are some suggested videos.\"\n\t\telse:\n\t\t\toutput_message = \"You searched for \\\"\" + output_query + \"\\\".\"\n\n\treturn render_template('search.html', name=project_name, netid=net_id, output_message=output_message, data=data, query=query, sortBy=sortBy, topics=top_topics, output_query=output_query, topic_output=topic_output)\n","sub_path":"app/irsystem/controllers/search_controller.py","file_name":"search_controller.py","file_ext":"py","file_size_in_byte":11198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"152479076","text":"import hashlib\nimport PySimpleGUI as sg\nimport os\nimport re\n\ndef hash(fname, method):\n if method == 'SHA1':\n hash = hashlib.sha1()\n elif method == 'MD5':\n hash = hashlib.md5()\n elif method == 'SHA256':\n hash = hashlib.sha256()\n \n with open(fname) as handle:\n for line in handle:\n hash.update(line.encode(encoding = 'utf-8'))\n return(hash.hexdigest())\n\n\nsg.change_look_and_feel('LightBlue3')\n\nlayout = [\n [sg.Text('File 1: '),\n sg.InputText(),\n sg.FileBrowse(),\n sg.Checkbox('SHA1'),\n sg.Checkbox('MD5')],\n [sg.Text('File 2: '),\n sg.InputText(),\n sg.FileBrowse(),\n sg.Checkbox('SHA256')],\n [sg.Output(size=(80,20))],\n [sg.Submit(), sg.Cancel()]\n]\n\nwindow = sg.Window('Compare Files', layout)\n\nwhile True:\n event, values = window.read()\n if event in (None, 'Exit', 'Cancel'):\n break\n if event=='Submit':\n # print(event, values)\n filepaths = []\n methods = []\n file1 = None\n file2 = None\n valid = None\n if values[0] and values[3]:\n # print(values[0])\n # print(values[3])\n file1 = re.findall('\\/.+\\.+.', values[0])\n file2 = re.findall('\\/.+\\.+.', values[3])\n valid = 1\n if (not file1 and file1 is not None) or not os.path.isfile(values[0]):\n print('Error: Invalid filepath for File 1')\n valid = 0\n elif (not file2 and file2 is not None) or not os.path.isfile(values[3]):\n print('Error: Invalid filepath for File 2')\n valid = 0\n elif not (values[1] or values[2] or values[4]):\n print('Error: No algorithm selected')\n valid = 0\n elif valid == 1:\n print('Info: Valid paths entered')\n if values[1]:\n methods.append('SHA1')\n if values[2]:\n methods.append('MD5')\n if values[4]:\n methods.append('SHA256')\n \n filepaths.append(values[0]) # File 1\n filepaths.append(values[3]) # File 2\n # print(methods)\n # print(filepaths)\n\n for method in methods:\n print(f'>> {method} Comparison')\n print(f'Hash of File 1 is {hash(filepaths[0], method)}')\n print(f'Hash of File 2 is {hash(filepaths[1], method)}')\n if hash(filepaths[0], method) == hash(filepaths[1], method):\n print(f'The two files are identical relying on {method} method\\n')\n else:\n print(f'The two files are different relying on {method} method\\n')\n else:\n print('Error: Please choose 2 files')","sub_path":"pysimplegui/GUI_compare_files.py","file_name":"GUI_compare_files.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"124740570","text":"from numpy import zeros\nfrom numpy import subtract\nfrom random import SystemRandom\n\nfrom scipy.optimize import linear_sum_assignment\nfrom rest_framework.exceptions import ValidationError\n\nfrom .models import Work\nfrom .models import Student\nfrom .models import Mentor\nfrom .models import Distribution\nfrom .constants import SCIENCE_MATCH\nfrom .constants import PERSONAL_MATCH\n\n\ndef distribution_auto(work_id, group):\n print(u\"LAUNCH AUTO DISTRIBUTION ON WORK %s GROUP %s\" % (work_id, group))\n\n directions = Work.objects.get(pk=int(work_id)).directions.all()\n students = list(Student.objects.filter(group=group))\n mentors = list(Mentor.objects.all())\n students_count = len(students)\n mentors_count = len(mentors)\n\n if students_count == 0:\n raise ValidationError(detail=u\"В группе %s отсутствуют студенты\" % group)\n if mentors_count == 0:\n raise ValidationError(detail=u\"Отсутствуют руководители\")\n\n if students_count > mentors_count:\n mentors.extend(SystemRandom().sample(mentors * (students_count - mentors_count),\n students_count - mentors_count))\n mentors_count = len(mentors)\n\n mentors_science_preferences = []\n mentors_personal_preferences = []\n for mentor in mentors:\n mentors_science_preferences.append(mentor.science_preferences.all())\n mentors_personal_preferences.append(mentor.personal_preferences.all())\n students_science_preferences = []\n students_personal_preferences = []\n for student in students:\n students_science_preferences.append(student.science_preferences.all())\n students_personal_preferences.append(student.personal_preferences.all())\n\n cost_matrix = zeros((students_count, mentors_count))\n\n for i in range(students_count):\n for j in range(mentors_count):\n match = len(set(students_science_preferences[i]).\n intersection(mentors_science_preferences[j]).\n intersection(directions)) * SCIENCE_MATCH\n if students[i] in mentors_personal_preferences[j]:\n match += PERSONAL_MATCH\n if mentors[j] in students_personal_preferences[i]:\n match += PERSONAL_MATCH\n\n cost_matrix[i][j] = match\n\n cost_matrix = subtract(cost_matrix.max(), cost_matrix)\n students_idx, mentors_idx = linear_sum_assignment(cost_matrix)\n\n for i in students_idx:\n Distribution.objects.update_or_create(work_id=work_id,\n student=students[i],\n defaults={\"mentor\": mentors[mentors_idx[i]]})\n\n print(u\"AUTO DISTRIBUTION DONE\")\n","sub_path":"distribution/distribution_app/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"32539370","text":"#!/usr/bin/env python\nimport math\nimport serial\nimport time\nfrom collections import deque\n\n\nser=serial.Serial('/dev/ttyUSB0',4800,timeout=1)\nnot_tracking=[]\nclass Pos():\n\tdef __init__(self):\n\t\tself.latDeg=None\t#int\n\t\tself.lonDeg=None\t#int\n\t\tself.latMin=None\t#dec min\n\t\tself.lonMin=None\t#dec min\n\t\tself.lat=None\t#dec deg\n\t\tself.lon=None\t#dec deg\n\t\tself.hour=None\n\t\tself.min=None\n\t\tself.sec=None\n\t\tself.timeset=False\n\t\tself.posfix=False\n\t\tself.altfix=False\n\t\tself.alt=None\t\t#ft\n\t\tself.satcnt=None\n\t\tself.speed=None\t#mph\n\t\tself.course=None\t#deg\n\t\tself.checkT=0\n\t\tself.checkF=0\n\t\tself.timezone=-7\n\t\tself.month=None\n\t\tself.day=None\n\t\tself.year=None\nclass GPS():\n\tdef __init__(self):\n\t\tself.cur_pos=Pos()\n\t\tself.checkT=0\n\t\tself.checkF=0\n\t\tself.ave_list=deque()\n\t\n\t\t\n\tdef check_sum_percent(self):\n\t\tif self.checkT>0:\n\t\t\treturn float(self.checkT)/float(self.checkT+self.checkF)\n\t\treturn 0\n\tdef check_sum(self,line):\n\t\ttry:\n\t\t\te=line.split(\"*\")\n\t\texcept:\n\t\t\t#print(\"No CheckSum\")\n\t\t\tself.checkF=self.checkF+1\n\t\t\treturn False\n\t\tmsg=list(e[0])\n\t\tmsg=msg[1:] # remove the $\n\t\tres=0\n\n\t\tfor c in msg:\n\t\t\tres=res^ ord(c)\n\t\tres=hex(res).upper()\n\t\tcheck=\"\"\n\t\ttry:\n\t\t\t\n\t\t\tcheck=e[1]\n\t\t\tcheck=check[:2]\n\t\t\tcheck=str(check).upper()\n\t\t\tcheck=\"0X\"+check\t\n\t\t\tif res==check:\n\t\t\t\t#print(\"Good\")\t\n\t\t\t\tself.checkT=self.checkT+1\n\t\t\t\treturn True\n\t\texcept:\n\t\t\t#print(\"Failed\")\n\t\t\tself.checkF=self.checkF+1\n\t\t\treturn False\n\t\treturn False\n\tdef feed(self, line):\n\t\t#print(self.check_sum_percent())\n\t\tif self.check_sum(line)==False:\n\t\t\treturn \n\t\t\t\n\t\t#send new line of data to GPS Class\n\t\tif \"GPGGA\" in line:\n\t\t\tself.processGPGGA(line)\n\t\t\treturn\n\t\tif \"GPRMC\" in line:\n\t\t\tself.processGPRMC(line)\n\t\t\treturn\n\t\t#['$GPGLL', '$GPGSA', '$GPRMC', '$GPGSV']\n\t\t#not processing it at this time\n\t\tprint(\"Not Processing this type:\")\n\t\tprint(line)\n\t\te=line.split(',')\n\t\tif e[0] not in not_tracking:\n\t\t\tnot_tracking.append(e[0])\n\tdef processGPRMC(self,line):\n\t\te=line.split(',')\n\t\t#2-data status V-warning\n\t\t#7-speed over ground in knots\n\t\t#8- track made good in degrees true\n\t\t#9 UT date ddmmyy\n\t\t#10 magnetic variation degrees\n\t\t#11 E or west East subtracks from true course\n\t\t#print(\"Processing GPRMC message\")\n\t\tself.cur_pos.speed=float(e[7])*1.15\n\tdef processGPGGA(self,line):\n\t\t#print(line)\n\t\t#$GPGGA,070538.000,4436.9643,N,12304.3958,W,2,08,1.0,89.9,M,-20.7,M,3.8,0000*76\n\t\t# time lat lon fix\n\t\t#print(\"Processing GPGGA message\")\n\t\tself.ave_list.append(self.cur_pos)\n\t\te=line.split(\",\")\n\t\tself.cur_pos.posfix=int(e[6])\n\t\tif self.cur_pos.posfix==0:\n\t\t\treturn\n\t\t#if N= neg?\n\t\tlat=float(e[2])\n\t\tif e[3]=='S':\n\t\t\tlat=lat*-1\n\t\tlon=float(e[4])\n\t\tif e[5]=='W':\n\t\t\tlon=lon*-1\n\t\t#print(lon)\n\t\t#print(int(lon/100))\n\t\tself.cur_pos.latDeg=int(lat/100)\n\t\tself.cur_pos.latMin=lat-(self.cur_pos.latDeg*100)\n\t\tself.cur_pos.lonDeg=int(lon/100)\n\t\tself.cur_pos.lonMin=lon-(self.cur_pos.lonDeg*100)\n\t\tself.cur_pos.latMin=abs(self.cur_pos.latMin)\n\t\tself.cur_pos.lonMin=abs(self.cur_pos.lonMin)\n\t\tself.cur_pos.lat=self.cur_pos.latDeg+self.cur_pos.latMin/60\n\t\tself.cur_pos.lon=self.cur_pos.lonDeg+self.cur_pos.lonMin/60\n\t\tself.cur_pos.alt=float(e[9])\n\t\tself.cur_pos.alt=self.cur_pos.alt*3.28084 #convert to feet\n\t\tt=float(e[1])\n\t\tt=int(t)\n\t\n\t\tself.cur_pos.hour=t/10000\n\t\tself.cur_pos.min=(t-self.cur_pos.hour*10000)/100\n\t\tself.cur_pos.sec=t-self.cur_pos.hour*10000-self.cur_pos.min*100\n\tdef printpos(self):\n\t\tif self.cur_pos.timezone is not None and self.cur_pos.hour is not None:\n\t\t\thour=self.cur_pos.hour+self.cur_pos.timezone\n\t\t\tif hour>23:\n\t\t\t\thour=hour-24\n\t\t\tif hour<0:\n\t\t\t\thour=hour+24\n\t\telse:\n\t\t\thour=self.cur_pos.hour\n\t\tif self.cur_pos.posfix==0:\n\t\t\tstatus=\"FAIL\"\n\t\telse:\n\t\t\tstatus=\"GOOD\"\n\t\tprint(\"*********************\")\n\t\tprint(str(hour)+\":\"+str(self.cur_pos.min)+\":\"+str(self.cur_pos.sec))\n\t\tprint(\"Status: \"+status)\n\t\tprint(\"Lat : \"+str(self.cur_pos.latDeg)+\" \"+str(self.cur_pos.latMin))\n\t\tprint(\"Lon : \"+str(self.cur_pos.lonDeg)+\" \"+str(self.cur_pos.lonMin))\n\t\tprint(\"Speed : \"+str(self.cur_pos.speed))\n\t\tprint(\"Alt : \"+str(self.cur_pos.alt))\n\t\tif self.checkT>0:\n\t\t\tprint(\"Sum : \"+str(float(self.checkT)/float(self.checkT+self.checkF)))\n\t\telse:\n\t\t\tprint(\"No Check Sum Yet\")\n\t\tprint(\"Len ave: \"+str(len(self.ave_list)))\n\t\tprint(\"*********************\")\n\t\tprint(\"\\n\")\n\ndef dist(s,e):\n\ttry:\n\t\tsLat=math.radians(s.lat)\n\t\tsLon=math.radians(s.lon)\n\t\teLat=math.radians(e.lat)\n\t\teLon=math.radians(e.lon)\n\texcept:\n\t\tprint(\"invalid position variables\")\n\t\treturn\n\tlatD=math.radians(e.lat-s.lat)\n\tlonD=math.radians(e.lon-s.lon)\n\tR=6371000\n\ta=math.sin(latD/2)*math.sin(latD/2)+math.cos(sLat)*math.cos(eLat)*math.sin(lonD/2)*math.sin(lonD/2)\n\tc=2*math.atan2(math.sqrt(a),math.sqrt(1-a))\n\td=R*c\n\tft= d*3.28084 #in feet\n\treturn ft\n\ndef bearing(s,e):\n\ttry:\n\t\tsLat=math.radians(s.lat)\n\t\tsLon=math.radians(s.lon)\n\t\teLat=math.radians(e.lat)\n\t\teLon=math.radians(e.lon)\n\texcept:\n\t\tprint(\"invalid position variables\")\n\t\treturn\n\ty=math.sin(eLon-sLon)*math.cos(eLat)\n\tx=math.cos(sLat)*math.sin(eLat)-math.sin(sLat)*math.cos(eLat)*math.cos(eLon-sLon)\n\tbrg=math.atan2(y,x)\n\tbrg=math.degrees(brg)\n\treturn brg\n\n\ndef main():\n\t#ser=findGPS()\n\t#write this!!!\n\tgps=GPS()\n\twhile True:\n\t\t#print(not_tracking)\n\t\tline=ser.readline()\n\t\tif len(line)>0:\n\t\t\tgps.feed(line)\n\t\t#time.sleep(.5)\n\t\tgps.printpos()\n\t\n\nif __name__==\"__main__\":\n\tmain()\n\n\n","sub_path":"gps.py","file_name":"gps.py","file_ext":"py","file_size_in_byte":5295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"215093840","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 8 21:17:37 2018\n\n@author: MagicAnn\n\"\"\"\n\nclass Node():\n#创建环形链表的各个节点 \n def __init__(self,value,next=None):\n self.value=value\n self.next=next\n \ndef createLink(num):\n#根据节点创建环形链表\n root=Node(1)\n temp=root\n for i in range(num-1):\n temp.next=Node(i+2)\n temp=temp.next\n temp.next=root\n return root\n \ndef josephus(num,k):\n if(num==1):\n print(\"survive:\",num)\n return\n root=createLink(num)\n temp=root\n \n while(1):\n for i in range(k-2): #循环k-2次\n temp=temp.next \n i+=1\n print(\"kill\",temp.next.value)\n temp.next=temp.next.next #相当于跳过了下一个,也就是kill掉的node\n temp=temp.next\n if(temp.next==temp):\n break\n \n print(\"survive\",temp.value)\n \na=int(input(\"请输入人数:\"))\nb=int(input(\"kill number:\"))\njosephus(a,b) \n \n \n \n ","sub_path":"sl_josephus.py","file_name":"sl_josephus.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"497807340","text":"import uproot\nimport numpy as np\nimport pandas as pd\nimport tqdm\n\nimport itertools\n\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader, Sampler\nimport itertools\nimport math\nfrom torch.nn import init\n\nimport dgl\n\nfrom dgl import DGLGraph\n\nfrom dgl.nn.pytorch import KNNGraph\n\nimport dgl.function as fn\nfrom dgl.base import DGLError\n\nfrom dgl import backend as F\n\nimport h5py\n\nfrom dgl.nn.pytorch import GraphConv\n\n\ndev = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# ------------ build the geometry for dataset ------------------ #\nX0, L_int = 3.9, 17.4\n\nZ_Val = [ (3*X0/2), (3*X0 + 16*X0/2), (3*X0 + 16*X0 + 6*X0/2), (3*X0 + 16*X0 + 6*X0 + 1 + 1.5*L_int/2),\n (3*X0 + 16*X0 + 6*X0 + 1 + 1.5*L_int + 4.1*L_int/2), \n (3*X0 + 16*X0 + 6*X0 + 1 + 1.5*L_int + 4.1*L_int + 1.8*L_int/2)\n ]\n\ngraph_size = 10 # --- K value of KNN graph\n\nlayer_size = [\n64,\n32,\n32,\n16,\n16,\n 8,]\n\nscale_factors = {layer_i: int(64/lsize)*int(64/lsize) for layer_i,lsize in enumerate(layer_size)}\n\ndef goes_to(l,x,y,factor):\n\n xs = [factor*x+i for i in range(factor)]\n ys = [factor*y+i for i in range(factor)]\n \n\n goes_to = np.array(list(itertools.product(xs,ys)))\n ls = l*np.ones(len(goes_to))\n return np.column_stack([ls,goes_to]).astype(int)\n\n\n\ngoes_to_dict = {}\n\n\nfor layer_i in range(6):\n\n layer_scale = int( 64/layer_size[layer_i] )\n #print(layer_scale)\n N = layer_size[layer_i]\n\n for cell_x in range(N):\n for cell_y in range(N):\n goes_to_dict[(layer_i,cell_x,cell_y)] = goes_to(layer_i,cell_x,cell_y,layer_scale)\n\n\n# --------------------- function to map cell index to abs value --------------- #\ndef Get_XYZ_val(z_idx, idx, idy, highres=False) : \n \n if(highres) : \n gran = layer_size[0]\n else : \n gran = layer_size[z_idx]\n \n x_val = idx / float(gran) * 125 - 125./2\n y_val = idy / float(gran) * 125 - 125./2\n z_val = Z_Val[z_idx] \n \n return [x_val, y_val, z_val]\n\n# ---------------------- define the custom dataset --------------------------------- #\n\nclass SuperResDataset(Dataset):\n \n def __init__(self, filename, ndata=-1):\n\n self.ndata = ndata\n \n self.file = h5py.File(filename,'r')\n \n self.evnt_sizes = self.file['Event_size'][:]\n \n self.evnt_size_highres = self.file['Event_size_HighRes'][:]\n \n self.cumsum = np.cumsum( self.evnt_sizes )\n \n self.cumsum_highres = np.cumsum( self.evnt_size_highres )\n \n #self.broadcast_factors = [scale_factors[l_i] for l_i in self.cell_layers]\n \n def __len__(self):\n\n if(self.ndata == -1) :\n return len(self.evnt_sizes) \n else : \n return self.ndata\n \n \n def __getitem__(self, idx):\n \n if idx == 0:\n start = 0\n start_hr = 0\n else:\n start = self.cumsum[idx-1]\n start_hr = self.cumsum_highres[idx-1]\n \n end = self.cumsum[idx]\n end_hr = self.cumsum_highres[idx]\n \n# print('start : ', start)\n# print('end : ', end)\n \n# print('start_hr : ', start_hr)\n# print('end_hr : ', end_hr)\n \n cell_xyz = self.file['CellXYLayer'][start:end]\n energies = self.file['TotalEnergy'][start:end]\n neu_energies = self.file['NeutralEnergy'][start:end]\n \n \n cell_xyz_highres = np.concatenate([goes_to_dict[(l,x,y)] for l,x,y in cell_xyz if l < 3])\n energies_highres = self.file['TotalEnergy_HighRes'][start_hr:end_hr]\n neu_energies_highres = self.file['NeutralEnergy_HighRes'][start_hr:end_hr]\n \n cell_xyz_val = np.array([ Get_XYZ_val(idx[0], idx[1], idx[2]) for idx in cell_xyz ])\n cell_xyz_val = np.reshape( cell_xyz_val, (1, cell_xyz_val.shape[0], cell_xyz_val.shape[1]) )\n cell_xyz_val = torch.FloatTensor(cell_xyz_val)\n \n cell_xyz_val_hr = np.array([ Get_XYZ_val(idx[0], idx[1], idx[2], highres=True) for idx in cell_xyz_highres ])\n cell_xyz_val_hr = np.reshape( cell_xyz_val_hr, (1, cell_xyz_val_hr.shape[0], cell_xyz_val_hr.shape[1]) )\n cell_xyz_val_hr = torch.FloatTensor(cell_xyz_val_hr)\n \n \n cell_layers = cell_xyz[:,0]\n #b_factors = [scale_factors[l_i] for l_i in cell_layers]\n \n b_factors = []\n for l_i in cell_layers : \n if(l_i < 3) : \n b_factors.append( scale_factors[l_i] ) \n else : \n b_factors.append( 0 )\n \n \n graph = KNNGraph(graph_size)\n \n g = graph(cell_xyz_val)\n g = dgl.transform.remove_self_loop(g)\n g.ndata['energy'] = torch.reshape(torch.FloatTensor(energies), ( torch.FloatTensor(energies).shape[0],1 ) )\n g.ndata['broadcast'] = torch.tensor(b_factors)\n \n g.ndata['parent_node'] = g.number_of_nodes() * torch.ones([ g.number_of_nodes() ], dtype=torch.int)\n g.ndata['_ID'] = g.nodes()\n g.ndata['cell_xyz'] = cell_xyz\n g.ndata['neu_energy'] = neu_energies[:, None]\n \n g_hr = graph(cell_xyz_val_hr)\n g_hr = dgl.transform.remove_self_loop(g_hr)\n \n g_hr.ndata['energy'] = torch.FloatTensor(energies_highres) \n frac_hr = torch.FloatTensor(neu_energies_highres)/torch.FloatTensor(energies_highres)\n frac_hr[ torch.isnan(frac_hr) ] = 0. \n frac_hr[ torch.where(frac_hr < 0.) ] = 0.\n g_hr.ndata['neu_frac'] = frac_hr[:, None]\n g_hr.ndata['neu_energy'] = neu_energies_highres[:, None]\n #g_hr.ndata['cell_xyz_highres'] = cell_xyz_highres\n \n g_out_hr = graph(cell_xyz_val_hr)\n g_out_hr = dgl.transform.remove_self_loop(g_out_hr)\n g_out_hr.ndata['cell_xyz_highres'] = cell_xyz_highres\n \n pi0_phi = self.file['Pi0_Phi'][idx:idx+1]\n pi0_theta = self.file['Pi0_Theta'][idx:idx+1]\n \n sample = {\n 'gr' : g,\n 'gr_hr' : g_hr,\n 'gr_out_hr' : g_out_hr,\n 'pi0_theta' : torch.FloatTensor( np.cos(pi0_theta) ),\n 'pi0_phi' : torch.FloatTensor( np.cos(pi0_phi) )\n# 'cell_xyz' : cell_xyz,\n# 'cell_xyz_highres' : cell_xyz_highres\n \n \n }\n \n return sample\n\n\n# --------------------- create the batch function ---------------- #\ndef create_batch(batch):\n \n \n graph = [ sample['gr'] for sample in batch ]\n graph_hr = [ sample['gr_hr'] for sample in batch ]\n graph_out_hr = [ sample['gr_out_hr'] for sample in batch ]\n \n pi0_theta = torch.tensor([ sample['pi0_theta'] for sample in batch ])\n pi0_phi = torch.tensor([ sample['pi0_phi'] for sample in batch ])\n \n \n return dgl.batch(graph), dgl.batch(graph_hr), dgl.batch(graph_out_hr), pi0_theta, pi0_phi\n\n\n# ================== STARTING DIFFERENT PARTS OF UNET MODEL ============================== #\n\n# ------------- make the down conv layer ---------- #\nclass TopKPooling(nn.Module):\n def __init__(self, frac, in_feat=1, out_feat=1):\n super(TopKPooling, self).__init__()\n\n self.p = nn.Parameter(torch.Tensor(in_feat, out_feat))\n self.reset_parameters()\n \n self.frac = frac\n \n self.in_feat = in_feat\n self.out_feat = out_feat\n \n def reset_parameters(self):\n \"\"\"Reinitialize learnable parameters.\"\"\"\n if self.p is not None:\n init.xavier_uniform_(self.p)\n\n\n def forward(self, gr):\n \n output_gr = []\n \n with gr.local_scope():\n \n graph_list = dgl.unbatch(gr)\n \n batch_itr = 0\n \n for g in graph_list : \n \n batch_itr += 1\n k_val = int(g.number_of_nodes() * self.frac)\n \n X = g.ndata['energy']\n \n #print('X shape : ', X.shape)\n \n # ----- y = X . p / ||p||\n if(self.p.shape[1] == 1) : \n y = (X * self.p)/torch.sqrt( torch.sum(self.p ** 2) ).item()\n else : \n y = torch.mm(X, self.p)/torch.sqrt( torch.sum(self.p ** 2) ).item()\n \n #print('y shape : ', y.shape)\n g.ndata['y'] = y #torch.transpose(y, 1, 0) \n \n \n # ------ idx = rank(y, k) ----------- #\n pooled_node_features, selected_nodes = dgl.topk_nodes(g, 'y', k=k_val, descending=True, idx=0)\n \n # --- reduced representation ----- #\n sg = g.subgraph( selected_nodes[0].tolist() )\n sg.copy_from_parent()\n \n X_bar = sg.ndata['energy']\n \n y_bar = nn.Sigmoid()(pooled_node_features)\n \n \n X_bar = torch.reshape( X_bar, ( X_bar.shape[0], self.out_feat) )\n# print('X_bar shape : ', X_bar.shape)\n# print('y_bar shape : ', y_bar.shape)\n \n \n mod_en = X_bar * y_bar[0]\n \n #print('Mod en shape : ', mod_en.shape)\n \n sg.ndata['energy'] = mod_en\n\n sg.ndata['parent_node'] = sg.parent.number_of_nodes() * torch.ones([ sg.number_of_nodes() ], dtype=torch.int)\n sg.ndata['own_node'] = sg.number_of_nodes() * torch.ones([ sg.number_of_nodes() ], dtype=torch.int64)\n sg.ndata['selected_node'] = selected_nodes[0]\n \n \n output_gr.append( sg )\n \n #print('End batch itr : ', batch_itr)\n \n \n #print(output_gr)\n return dgl.batch(output_gr)\n\n# ------------------------------ define the UpPool block -------------------------- #\nclass UpPool(nn.Module):\n def __init__(self, feat_dim):\n super(UpPool, self).__init__()\n\n self.dim = feat_dim\n \n def forward(self, bg, bg_u):\n\n output_gr = []\n \n with bg.local_scope():\n with bg_u.local_scope():\n \n graph_list = dgl.unbatch(bg)\n graph_list_u = dgl.unbatch(bg_u)\n\n for ig in range(len(graph_list)) :\n\n g = graph_list[ig]\n g_u = graph_list_u[ig]\n \n #print('----- start filling -------')\n n_unpooled_node = g.ndata['parent_node'][0]\n\n selected_nodes = g.ndata['_ID'][:, None] #a\n pooled_node_features = g.ndata['energy'] #b\n \n# print('selected_nodes shape : ', selected_nodes.shape)\n# print('pooled_node_features : ', pooled_node_features.shape)\n\n expanded_node = selected_nodes.expand_as(pooled_node_features) #c\n expanded_node = expanded_node.to(dev)\n\n pooled_node_features = pooled_node_features.to(dev)\n\n x = torch.zeros(n_unpooled_node, self.dim, device=dev)\n #x.to(dev)\n \n x.scatter_(0, expanded_node, pooled_node_features )\n \n #print('----- end filling -------')\n \n g_new = dgl.DGLGraph()\n g_new.add_nodes(n_unpooled_node)\n \n src, dst = g_u.edges()\n g_new.add_edges(src, dst)\n \n g_new.ndata['energy'] = x\n g_new.ndata['parent_node'] = g_u.ndata['parent_node'][0] * torch.ones([ g_new.number_of_nodes() ], dtype=torch.int)\n g_new.ndata['_ID'] = torch.tensor(g_u.ndata['_ID'], dtype=torch.int64)\n \n# print('Output node energy shape : ', g_new.ndata['energy'].shape)\n \n output_gr.append(g_new) \n \n \n return dgl.batch(output_gr)\n\n\n# ------------------------- define the broadcasting ---------------- #\n\nclass Broadcasting(nn.Module):\n def __init__(self):\n super(Broadcasting, self).__init__()\n \n def forward(self, bg, bg_out_hr,feature_name='energy',out_name='neu_energy'):\n \n output_gr = [] \n \n with bg.local_scope():\n with bg_out_hr.local_scope():\n \n graph_list = dgl.unbatch(bg)\n graph_list_out_hr = dgl.unbatch(bg_out_hr)\n \n for ig in range(len(graph_list)) :\n \n g = graph_list[ig]\n g_out_hr = graph_list_out_hr[ig]\n \n data = g.ndata[feature_name] \n data = torch.reshape(data, (data.shape[0],) )\n b_factors = g.ndata['broadcast']\n\n out = torch.repeat_interleave(data,b_factors,dim=0)\n\n g_out_hr.ndata[out_name] = out[:, None]\n \n output_gr.append(g_out_hr )\n \n return dgl.batch(output_gr)\n\n\n# -------------------------------- define the GraphUNet model -------------------------------- #\nclass GraphUNet(nn.Module):\n def __init__(self):\n super(GraphUNet, self).__init__()\n\n scale = 2\n\n self.do_conv1 = GraphConv(in_feats = 1, out_feats = 1)\n self.do_pool1 = TopKPooling(frac=0.75, in_feat=1, out_feat=1) \n \n self.do_conv2 = GraphConv(in_feats = 1, out_feats = 5*scale)\n self.do_pool2 = TopKPooling(frac=0.75, in_feat=5*scale, out_feat=5*scale)\n \n self.do_conv3 = GraphConv(in_feats = 5*scale, out_feats = 7*scale)\n self.do_pool3 = TopKPooling(frac=0.75, in_feat=7*scale, out_feat=7*scale)\n \n self.do_conv4 = GraphConv(in_feats = 7*scale, out_feats = 9*scale)\n self.do_pool4 = TopKPooling(frac=0.75, in_feat=9*scale, out_feat=9*scale)\n \n self.bn_conv = GraphConv(in_feats = 9*scale, out_feats = 9*scale)\n \n self.up_pool1 = UpPool(feat_dim = 9*scale)\n self.up_conv1 = GraphConv(in_feats = 9*scale, out_feats = 7*scale)\n \n self.up_pool2 = UpPool(feat_dim = 7*scale)\n self.up_conv2 = GraphConv(in_feats = 7*scale, out_feats = 5*scale)\n \n self.up_pool3 = UpPool(feat_dim = 5*scale)\n self.up_conv3 = GraphConv(in_feats = 5*scale, out_feats = 1)\n \n self.up_pool4 = UpPool(feat_dim = 1)\n self.up_conv4 = GraphConv(in_feats = 1, out_feats = 1)\n \n self.broadcast = Broadcasting()\n \n self.b_conv = GraphConv(in_feats = 1, out_feats = 1)\n \n self.proj = nn.Sequential(\n nn.Linear(1, 20),\n nn.LeakyReLU( -0.8 ), \n nn.Tanh(),\n\n nn.Linear(20, 40), \n nn.LeakyReLU( -0.8 ), \n nn.Tanh(),\n\n nn.Linear(40, 30), \n nn.LeakyReLU( -0.8 ),\n nn.Tanh(),\n\n nn.Linear(30, 10), \n nn.LeakyReLU( -0.8 ),\n nn.Tanh(),\n\n nn.Linear(10, 5), \n nn.LeakyReLU( -0.8 ),\n nn.Tanh(),\n\n nn.Linear(5, 3)\n )\n \n def forward(self, bg, bg_out_hr):\n\n #print(bg.batch_num_nodes)\n #print('bg en shape : ', bg.ndata['energy'].shape )\n \n # ---- 1st down block ---- #\n h = bg.ndata['energy']\n h = self.do_conv1(bg, h)\n bg.ndata['energy'] = h\n bg_d1 = self.do_pool1(bg)\n #print('bg D1 en shape : ', bg_d1.ndata['energy'].shape )\n \n # ---- 2nd down block ---- #\n h = bg_d1.ndata['energy']\n h = self.do_conv2(bg_d1, h)\n bg_d1.ndata['energy'] = h\n bg_d2 = self.do_pool2(bg_d1)\n #print('bg D2 en shape : ', bg_d2.ndata['energy'].shape )\n \n # ---- 3rd down block ---- #\n h = bg_d2.ndata['energy']\n h = self.do_conv3(bg_d2, h)\n bg_d2.ndata['energy'] = h\n bg_d3 = self.do_pool3(bg_d2)\n #print('bg D3 en shape : ', bg_d3.ndata['energy'].shape )\n \n # ---- 4th down block ---- #\n h = bg_d3.ndata['energy']\n h = self.do_conv4(bg_d3, h)\n bg_d3.ndata['energy'] = h\n bg_d4 = self.do_pool4(bg_d3)\n #print('bg D4 en shape : ', bg_d4.ndata['energy'].shape )\n \n # ------- bottle-neck block ----- #\n h = bg_d4.ndata['energy']\n h = self.bn_conv( bg_d4, h )\n bg_bn = bg_d4\n bg_bn.ndata['energy'] = h\n \n \n # --------- 1st up-conv block ------- #\n bg_u1 = self.up_pool1(bg_bn, bg_d3)\n bg_u1.ndata['energy'] = bg_u1.ndata['energy'] + bg_d3.ndata['energy'] # -- the 1st skip con -- #\n h = bg_u1.ndata['energy']\n h = self.up_conv1(bg_u1, h)\n bg_u1.ndata['energy'] = h\n #print('bg U1 en shape : ', bg_u1.ndata['energy'].shape )\n \n \n # --------- 2nd up-conv block ------- #\n bg_u2 = self.up_pool2(bg_u1, bg_d2)\n bg_u2.ndata['energy'] = bg_u2.ndata['energy'] + bg_d2.ndata['energy'] # -- the 2nd skip con -- #\n h = bg_u2.ndata['energy']\n h = self.up_conv2(bg_u2, h)\n bg_u2.ndata['energy'] = h\n #print('bg U2 en shape : ', bg_u2.ndata['energy'].shape )\n \n # --------- 3rd up-conv block ------- #\n bg_u3 = self.up_pool3(bg_u2, bg_d1)\n bg_u3.ndata['energy'] = bg_u3.ndata['energy'] + bg_d1.ndata['energy'] # -- the 3rd skip con -- #\n h = bg_u3.ndata['energy']\n h = self.up_conv3(bg_u3, h)\n bg_u3.ndata['energy'] = h\n #print('bg U3 en shape : ', bg_u3.ndata['energy'].shape )\n \n # --------- 4th up-conv block ------- #\n bg_u4 = self.up_pool4(bg_u3, bg)\n bg_u4.ndata['energy'] = bg_u4.ndata['energy'] + bg.ndata['energy'] # -- the 4th skip con -- #\n h = bg_u4.ndata['energy']\n h = self.up_conv4(bg_u4, h)\n bg_u4.ndata['energy'] = h\n #print('bg U4 en shape : ', bg_u4.ndata['energy'].shape )\n \n bg_u4.ndata['broadcast'] = bg.ndata['broadcast']\n \n bg_out_hr = self.broadcast( bg_u4, bg_out_hr )\n \n h = bg_out_hr.ndata['neu_energy']\n h = self.b_conv( bg_out_hr, h )\n \n #h = nn.Tanh()(h)\n h = nn.ReLU()(h)\n bg_out_hr.ndata['neu_energy'] = h\n \n hout = []\n \n with bg_out_hr.local_scope():\n graph_list_out_hr = dgl.unbatch(bg_out_hr) \n \n for ig in range(len(graph_list_out_hr)) :\n \n g_out_hr = graph_list_out_hr[ig]\n \n h = g_out_hr.ndata['neu_energy']\n \n ho = self.proj( torch.mean(h, dim=0) )\n\n hout.append(ho[None,:])\n \n hout = torch.cat( hout, dim=0 )\n \n return bg_out_hr, hout\n\n\n# ------------------- the loss function ---------- #\ndef LossFunction(bg_hr_pr, gr_hr_tar, vec, theta, phi) : \n \n pi0_M = 135.\n \n \n tar_ne_en = gr_hr_tar.ndata['neu_energy'] \n pred_ne_en = bg_hr_pr.ndata['neu_energy']\n\n tar_ne_en = tar_ne_en \n pred_ne_en = pred_ne_en\n\n \n wt_avg = torch.sum( torch.abs( pred_ne_en.to(dev) - tar_ne_en.to(dev) ) )\n \n wt_avg = wt_avg / torch.sum( tar_ne_en.to(dev) )\n \n theta_pr = vec[:,0]\n phi_pr = vec[:,1]\n mass_fr = vec[:,2]\n \n del_th = torch.sum( ( (theta_pr - theta)/theta ) ** 2 )\n del_phi = torch.sum( ( (phi_pr - phi)/phi ) ** 2 )\n del_m = torch.sum( (mass_fr - 1) ** 2 ) * pi0_M\n \n total_loss = wt_avg + del_th + del_phi + del_m\n \n return total_loss\n\n\n# ----------------------- the data loader ----------------- #\n\ntrain_data = SuperResDataset('training_set.h5', ndata=40000)\nvalid_data = SuperResDataset('validation_set.h5', ndata=8000)\n\ntrain_loader = torch.utils.data.DataLoader(train_data, batch_size=10, shuffle=True,collate_fn=create_batch)\nvalid_loader = torch.utils.data.DataLoader(valid_data, batch_size=10, shuffle=True,collate_fn=create_batch)\n\n\n# ---------------- declare the model & optimizer ----------------- #\nmodel = GraphUNet()\nmodel.to(dev)\n\nopt = optim.Adam(model.parameters(), lr=1e-4)\n\n# ---------------- Make the training loop ----------------- #\n\ntrain_loss_v, valid_loss_v = [], []\n\n\n# number of epochs to train the model\nn_epochs = 100\n\nvalid_loss_min = np.Inf # track change in validation loss\n\nif( len(valid_loss_v) > 0 ) : \n valid_loss_min = np.min( np.array(valid_loss_v) )\n\n\nfor epoch in range(1, n_epochs+1):\n\n # keep track of training and validation loss\n train_loss = 0.0\n valid_loss = 0.0\n \n ###################\n # train the model #\n ###################\n #scheduler.step()\n model.train() ## --- set the model to train mode -- ##\n\n with tqdm.tqdm(train_loader, ascii=True) as tq:\n for gr ,gr_hr, gr_out_hr, pi0_theta, pi0_phi in tq:\n \n gr ,gr_hr, gr_out_hr, pi0_theta, pi0_phi = gr.to(dev) ,gr_hr.to(dev), gr_out_hr.to(dev), pi0_theta.to(dev), pi0_phi.to(dev)\n\n opt.zero_grad()\n\n bg, hout = model( gr, gr_out_hr )\n\n loss = LossFunction( bg, gr_hr, hout, pi0_theta, pi0_phi )\n\n loss.backward()\n # perform a single optimization step (parameter update)\n opt.step()\n\n # update training loss\n train_loss += loss.item() * pi0_theta.shape[0]\n\n del gr; del gr_hr; del gr_out_hr; del pi0_theta; del pi0_phi\n torch.cuda.empty_cache()\n\n\n ###################### \n # validate the model #\n ######################\n model.eval() ## --- set the model to validation mode -- ##\n\n with tqdm.tqdm(valid_loader, ascii=True) as tq:\n\n for gr ,gr_hr, gr_out_hr, pi0_theta, pi0_phi in tq:\n \n gr ,gr_hr, gr_out_hr, pi0_theta, pi0_phi = gr.to(dev) ,gr_hr.to(dev), gr_out_hr.to(dev), pi0_theta.to(dev), pi0_phi.to(dev)\n\n bg, hout = model( gr, gr_out_hr )\n\n loss = LossFunction( bg, gr_hr, hout, pi0_theta, pi0_phi )\n\n # update training loss\n valid_loss += loss.item() * pi0_theta.shape[0]\n\n del gr; del gr_hr; del gr_out_hr; del pi0_theta; del pi0_phi\n torch.cuda.empty_cache()\n\n\n\n # calculate average losses\n train_loss = train_loss/len(train_loader.dataset)\n valid_loss = valid_loss/len(valid_loader.dataset)\n \n # print training/validation statistics \n print('Epoch: {} \\tTraining Loss: {:.6f} \\tValidation Loss: {:.6f}'.format(\n epoch, train_loss, valid_loss))\n \n train_loss_v.append(train_loss) \n valid_loss_v.append(valid_loss)\n \n # save model if validation loss has decreased\n if valid_loss <= valid_loss_min:\n print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(\n valid_loss_min,\n valid_loss))\n torch.save(model.state_dict(), 'model_GraphUNet.pt')\n valid_loss_min = valid_loss\n","sub_path":"TRAINING_SCRIPTS/SuperRes_Training.py","file_name":"SuperRes_Training.py","file_ext":"py","file_size_in_byte":23753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"191756792","text":"# imports\nimport bluetooth\nimport time\nimport json\nimport datetime\nimport os\n\n\n# global variables\ncurrentAbsolutePath = str( os.path.dirname(__file__) );\nif currentAbsolutePath:\n currentAbsolutePath += '/'\narduinoCommScriptPath = os.path.join( currentAbsolutePath, 'arduino-communication.py' )\njsonPathToAuthedDevices = os.path.join( currentAbsolutePath, '../../device-data/authorized-devices.json' )\njsonPathToAvailableDevices = os.path.join( currentAbsolutePath, '../../device-data/available-devices.json' )\n\n# check devices for availabillity\nwhile True:\n deviceStatusData = []\n deviceFound = False\n\n # load json data of authorized devices / users\n with open( jsonPathToAuthedDevices ) as jsonFile:\n deviceData = json.load( jsonFile )\n\n # ble sniffing\n for item in deviceData:\n result = bluetooth.lookup_name( item[ 'address' ], timeout = 5 )\n\n if ( result != None ):\n item[ 'status' ] = '1'\n # print( item[ 'name' ] + ' is available' )\n\n deviceFound = True\n\n # get time of last status update\n item[ 'time' ] = str( datetime.datetime.now() )\n else:\n item[ 'status' ] = '0'\n # print( item[ 'name' ] + ' is not available' )\n\n # add to export json file object\n deviceStatusData.append( item )\n\n # if a device is available open or close the door\n if deviceFound:\n os.system( 'sudo python ' + arduinoCommScriptPath + ' ON' )\n else:\n os.system( 'sudo python ' + arduinoCommScriptPath + ' OFF' )\n\n # write / update available data file\n with open( jsonPathToAvailableDevices, 'w+' ) as writeFile:\n json.dump( deviceStatusData, writeFile, indent = 4, sort_keys = True )\n\n # wait some time\n time.sleep( 2 )\n","sub_path":"Raspberry Scripts/scripts/python-scripts/available-devices.py","file_name":"available-devices.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"530662448","text":"# encoding: utf-8\n# -*- coding: utf-8 -*-\n\nimport sys\nimport urllib\nimport image_crop\nimport bilibili_channel\nfrom pyquery import PyQuery as pq\nfrom lxml.html import HTMLParser, fromstring\nfrom workflow import Workflow, web\n\n\ndef main(workflow):\n query = workflow.args[0]\n query = query.encode('utf-8')\n if bilibili_channel.in_channel(query):\n channel_search(query)\n else:\n keyword_search(query)\n\n\ndef keyword_search(query):\n url = 'http://search.bilibili.com/all?' + urllib.urlencode({\"keyword\": query})\n r = web.get(url)\n\n # throw an error if request failed\n # Workflow will catch this and show it to the user\n r.raise_for_status()\n\n # init the image directory\n image_crop.init_dir()\n\n # analyse dom\n # dom = pq(r.content)\n dom = pq(fromstring(r.content, parser=HTMLParser(encoding='utf-8')))\n\n # 读取剧集列表\n album_list = dom.find('li.synthetical')\n for album in album_list:\n handle_album(album)\n\n # 读取单视频列表\n video_list = dom.find('li.video')\n for video in video_list:\n handle_video(video)\n\n wf.send_feedback()\n\n\ndef channel_search(query):\n url = bilibili_channel.get_channel_url(query)\n r = web.get(url)\n\n # throw an error if request failed\n # Workflow will catch this and show it to the user\n r.raise_for_status()\n\n # init the image directory\n image_crop.init_dir()\n\n # analyse dom\n # dom = pq(r.content)\n dom = pq(fromstring(r.content, parser=HTMLParser(encoding='utf-8')))\n\n # 读取视频列表\n video_list = dom.find('div.l-item')\n for video in video_list:\n handle_channel_video(video)\n\n wf.send_feedback()\n\n\ndef handle_album(album):\n # title\n title = pq(album).find('a.title').attr('title')\n # description\n desp = pq(album).find('div.des').text()\n # arg(link)\n link = pq(album).find('a:first').attr('href')\n # img\n img = pq(album).find('a:first').find('img').attr('src')\n\n wf.add_item(title=title,\n subtitle=desp,\n arg=link.decode('utf-8'),\n icon=image_crop.load_and_save_img(img),\n valid=True)\n\n\ndef handle_video(video):\n video = pq(video)\n # title\n title = video.find('div.headline').find('a.title').attr('title')\n # time\n time = video.find('span:first').text().strip().encode('utf-8')\n # subtitle\n _tags = video.find('div.tags')\n author = _tags.find('span').eq(3).text().strip().encode('utf-8')\n view = _tags.find('span').eq(0).text().strip().encode('utf-8')\n bullet = _tags.find('span').eq(1).text().strip().encode('utf-8')\n date = _tags.find('span').eq(2).text().strip().encode('utf-8')\n subtitle = '时间:' + time +\\\n ' UP主:' + author +\\\n ' 观看:' + view +\\\n ' 弹幕:' + bullet +\\\n ' 上传时间:' + date\n\n # arg(link)\n link = video.children('a:first').attr('href')\n # img\n img = video.find('a:first img').attr('src')\n\n wf.add_item(title=title,\n subtitle=subtitle.decode('utf-8'),\n arg=link.decode('utf-8'),\n icon=image_crop.load_and_save_img(img),\n valid=True)\n\n\ndef handle_channel_video(video):\n video = pq(video)\n # title\n title = video.find('div.l-r').find('a.title').attr('title')\n # subtitle\n author = video.find('div.up-info').find('a.v-author').attr('title').encode('utf-8')\n view = video.find('div.v-info').find('span.gk').children('span').attr('number').encode('utf-8')\n bullet = video.find('div.v-info').find('span.dm').children('span').attr('number').encode('utf-8')\n date = video.find('div.up-info').find('span.v-date').text().strip().encode('utf-8')\n subtitle = ' UP主:' + author + \\\n ' 观看:' + view + \\\n ' 弹幕:' + bullet + \\\n ' 上传时间:' + date\n\n # arg(link)\n link = video.children('a:first').attr('href')\n # img\n img = video.find('a:first img').attr('data-img')\n\n wf.add_item(title=title,\n subtitle=subtitle.decode('utf-8'),\n arg=link.decode('utf-8'),\n icon=image_crop.load_and_save_img(img),\n valid=True)\n\nif __name__ == '__main__':\n wf = Workflow()\n sys.exit(wf.run(main))\n","sub_path":"bilibili.py","file_name":"bilibili.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"554398358","text":"from selenium import webdriver\nimport time\nfrom bs4 import BeautifulSoup\n\n# chrome driver option參數\n# 可參考https://www.itread01.com/content/1544787185.html\noptions = webdriver.ChromeOptions()\n# 不加載圖片\noptions.add_argument('blink-settings=imagesEnabled=false')\n\n# chrome driver放在專案目錄下,如果不是放在專案目錄下,則需另外指定路徑\n# driver = webdriver.Chrome(executable_path=\"chrome driver路徑\")\ndriver = webdriver.Chrome(options=options)\ndriver.get(\"該用戶的喜歡url\")\n\n# 使用JS下拉網頁,每拉一次等兩秒\nSCROLL_PAUSE_TIME = 2\n\nlast_height = driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\nwhile True:\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(SCROLL_PAUSE_TIME)\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\n if new_height == last_height:\n break\n last_height = new_height\n\n# 找出所有連結\n# Beautiful Soup 解析 HTML 程式碼\nhtml = driver.page_source\nsoup = BeautifulSoup(html, \"lxml\")\n# 使用css selecter取得a tag\nall_links = soup.select('div.isayt>a')\n\n# 將連結寫入txt檔\nf = open('./all_links2.txt','w')\nfor link in all_links:\n f.write(link.get('href'))\n f.write('\\n')\nf.close()\n\ndriver.close()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"643270722","text":"from .lowresdensenet import lowres_densenet121, lowres_densenet161, lowres_densenet169\nfrom .lowresgooglenet import lowres_googlenet\nfrom .lowresinception import lowres_inception_v3\nfrom .lowresmobilenetv2 import lowres_mobilenet_v2\nfrom .lowresresnet import lowres_resnet14, lowres_resnet18, lowres_resnet18_noresidual, lowres_resnet34, \\\n lowres_resnet50, lowres_resnet101, lowres_resnet152\nfrom .lowresvgg import lowres_vgg11_bn, lowres_vgg13_bn, lowres_vgg16_bn, lowres_vgg19_bn, lowres_vgg11, lowres_vgg13, \\\n lowres_vgg16, lowres_vgg19\nfrom .lowresresnet9 import lowres_resnet9\nfrom .lowresalexnet import lowres_alexnet\n\nall_classifiers = {\n \"lowres_vgg11_bn\": lowres_vgg11_bn,\n \"lowres_vgg13_bn\": lowres_vgg13_bn,\n \"lowres_vgg16_bn\": lowres_vgg16_bn,\n \"lowres_vgg19_bn\": lowres_vgg19_bn,\n \"lowres_vgg11\": lowres_vgg11_bn,\n \"lowres_vgg13\": lowres_vgg13_bn,\n \"lowres_vgg16\": lowres_vgg16_bn,\n \"lowres_vgg19\": lowres_vgg19_bn,\n \"lowres_resnet14\": lowres_resnet14,\n \"lowres_resnet18\": lowres_resnet18,\n \"lowres_resnet18_noresidual\": lowres_resnet18_noresidual,\n \"lowres_resnet34\": lowres_resnet34,\n \"lowres_resnet50\": lowres_resnet50,\n \"lowres_resnet101\": lowres_resnet101,\n \"lowres_resnet152\": lowres_resnet152,\n \"lowres_resnet9\": lowres_resnet9,\n \"lowres_densenet121\": lowres_densenet121,\n \"lowres_densenet161\": lowres_densenet161,\n \"lowres_densenet169\": lowres_densenet169,\n \"lowres_mobilenet_v2\": lowres_mobilenet_v2,\n \"lowres_googlenet\": lowres_googlenet,\n \"lowres_inception_v3\": lowres_inception_v3,\n \"lowres_alexnet\": lowres_alexnet\n}\n\n\ndef get_model(name):\n return all_classifiers.get(name)\n","sub_path":"models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"534426820","text":"import pytest\nimport tensorflow as tf\nimport numpy as np\n\nfrom tfs.dataset.skdata import *\nfrom tfs.dataset.predefined import *\nimport tfs.dataset.data_tool as dtool\nimport shutil\n\n@pytest.fixture\ndef data():\n return MakeBlobs(test_percent=0.3,n_samples=100)\n\nclass TestDataTool:\n def test_split(self,capsys):\n d=np.arange(10)\n ds=dtool.split_n(d,5)\n assert len(ds)==5\n for dd in ds:\n assert len(dd)==2\n\nclass TestDataset:\n def test_empty(self):\n d=Dataset()\n\n def test_dataset(self,data):\n assert data.train.shape[0]==70\n\n def test_cv(self,data):\n i=0\n for train,test in data.train.cross_validation_loop(7):\n i=i+1\n assert train.shape[0]==60\n assert test.shape[0]==10\n assert i==7\n\n def test_batch(self,data):\n first_data=data.train.data[0]\n for i in range(8):\n x,y = data.train.next_batch(10,False)\n assert x.shape[0]==10 and y.shape[0]==10\n assert data.train.epochs_completed==1\n assert (x[0] == first_data).all()\n\n def test_one_hot(self,data):\n lbls = data.train.labels.copy()\n data.to_one_hot()\n assert data.train.labels.ndim==2\n data.to_raw_label()\n assert data.train.labels.ndim==1\n np.testing.assert_array_equal(lbls,data.train.labels)\n\n\n def test_cifar10(self,capsys):\n with capsys.disabled():\n data = Cifar10()\n\n def test_mnist(self,capsys):\n with capsys.disabled():\n data = Mnist()\n\n\n\n","sub_path":"tests/dataset/dataset_test.py","file_name":"dataset_test.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"537903332","text":"import sys\nimport os\n\nfrom lxml import etree as ET\n\nfrom ._xml_parser_oval_scan_definitions import _XmlParserScanDefinitions\nfrom ._builder_oval_graph import _BuilderOvalGraph\nfrom .exceptions import NotChecked\n\nns = {\n 'XMLSchema': 'http://oval.mitre.org/XMLSchema/oval-results-5',\n 'xccdf': 'http://checklists.nist.gov/xccdf/1.2',\n 'arf': 'http://scap.nist.gov/schema/asset-reporting-format/1.1',\n 'oval-definitions': 'http://oval.mitre.org/XMLSchema/oval-definitions-5',\n 'scap': 'http://scap.nist.gov/schema/scap/source/1.2',\n 'oval-characteristics': 'http://oval.mitre.org/XMLSchema/oval-system-characteristics-5',\n}\n\n\nclass XmlParser:\n def __init__(self, src):\n self.src = src\n self.tree = ET.parse(self.src)\n self.root = self.tree.getroot()\n if not self.validate(\n 'schemas/arf/1.1/asset-reporting-format_1.1.0.xsd'):\n CRED = '\\033[91m'\n CEND = '\\033[0m'\n print(\n CRED +\n \"Warning: This file is not valid arf report.\" +\n CEND,\n file=sys.stderr)\n try:\n self.used_rules = self._get_used_rules()\n self.report_data = self._get_report_data(\n list(self.used_rules.values())[0]['href'])\n self.notselected_rules = self._get_notselected_rules()\n self.definitions = self._get_definitions()\n self.oval_definitions = self._get_oval_definitions()\n self.scan_definitions = _XmlParserScanDefinitions(\n self.definitions, self.oval_definitions, self.report_data).get_scan()\n except BaseException:\n raise ValueError(\n 'This file \"{}\" is not arf report file or there are no results'.format(\n self.src))\n\n def get_src(self, src):\n _dir = os.path.dirname(os.path.realpath(__file__))\n FIXTURE_DIR = os.path.join(_dir, src)\n return str(FIXTURE_DIR)\n\n def validate(self, xsd_path):\n xsd_path = self.get_src(xsd_path)\n xmlschema_doc = ET.parse(xsd_path)\n xmlschema = ET.XMLSchema(xmlschema_doc)\n\n xml_doc = self.tree\n result = xmlschema.validate(xml_doc)\n\n return result\n\n def _get_used_rules(self):\n rulesResults = self.root.findall(\n './/xccdf:TestResult/xccdf:rule-result', ns)\n rules = {}\n for ruleResult in rulesResults:\n result = ruleResult.find('.//xccdf:result', ns)\n if result.text != \"notselected\":\n check_content_ref = ruleResult.find(\n './/xccdf:check/xccdf:check-content-ref', ns)\n message = ruleResult.find(\n './/xccdf:message', ns)\n rule_dict = {}\n if check_content_ref is not None:\n rule_dict['id_def'] = check_content_ref.attrib.get('name')\n rule_dict['href'] = check_content_ref.attrib.get('href')\n rule_dict['result'] = result.text\n if message is not None:\n rule_dict['message'] = message.text\n rules[ruleResult.get('idref')] = rule_dict\n return rules\n\n def _get_report_data(self, href):\n report_data = None\n reports = self.root.find('.//arf:reports', ns)\n for report in reports:\n if \"#\" + str(report.get(\"id\")) == href:\n report_data = report\n return report_data\n\n def _get_notselected_rules(self):\n rulesResults = self.root.findall(\n './/xccdf:TestResult/xccdf:rule-result', ns)\n rules = []\n for ruleResult in rulesResults:\n result = ruleResult.find('.//xccdf:result', ns)\n if result.text == \"notselected\":\n rules.append(ruleResult.get('idref'))\n return rules\n\n def _get_definitions(self):\n data = self.report_data.find(\n ('.//XMLSchema:oval_results/XMLSchema:results/'\n 'XMLSchema:system/XMLSchema:definitions'), ns)\n return data\n\n def _get_oval_definitions(self):\n return self.root.find(\n './/arf:report-requests/arf:report-request/'\n 'arf:content/scap:data-stream-collection/'\n 'scap:component/oval-definitions:oval_definitions/'\n 'oval-definitions:definitions', ns)\n\n def _get_definition_of_rule(self, rule_id):\n if rule_id in self.used_rules:\n rule_info = self.used_rules[rule_id]\n if rule_info['id_def'] is None:\n raise NotChecked(\n '\"{}\" is {}: {}'.format(\n rule_id,\n rule_info['result'],\n rule_info['message']))\n return dict(rule_id=rule_id,\n definition_id=rule_info['id_def'],\n definition=self.scan_definitions[rule_info['id_def']])\n elif rule_id in self.notselected_rules:\n raise ValueError(\n 'Rule \"{}\" was not selected, so there are no results.'\n .format(rule_id))\n else:\n raise ValueError('404 rule \"{}\" not found!'.format(rule_id))\n\n def get_oval_tree(self, rule_id):\n return _BuilderOvalGraph.get_oval_graph_from_dict_of_rule(\n self._get_definition_of_rule(rule_id))\n","sub_path":"oval_graph/xml_parser.py","file_name":"xml_parser.py","file_ext":"py","file_size_in_byte":5354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"377243217","text":"from visual import *\n\ndef pause():\n while True:\n rate(50)\n if scene.mouse.events:\n m = scene.mouse.getevent()\n if m.click == 'left': return\n elif scene.kb.keys:\n k = scene.kb.getkey()\n return\n\nscene.range=20\nscene.width=600\nscene.height=450\n\nboxcolor=color.yellow\nballcolor=color.white\npaddlecolor=color.green\n\n#create graphical objects\nceiling=box(pos=(0,12,0), length=50,width=1,height=2, color=boxcolor)\nfloor=box(pos=(0,-12,0), length=50,width=1,height=2, color=boxcolor)\nball=sphere(pos=(0,0,0), radius=0.5)\n\npaddle2=box(pos=(18,0,0), length=0.5, width=1, height=5, color=paddlecolor)\n\nleftwall1=box(pos=(-18,-7,0), length=0.5, width=1, height=10, color=boxcolor)\nleftwall2=box(pos=(-18,7,0), length=0.5, width=1, height=10, color=boxcolor)\n\n#set the initial velocity of the ball\ninitialVelocity=3*vector(-5,-4,0)\nball.velocity=initialVelocity\n\ndt=0.01\n\n#scores\nplayer1score=0\nplayer2score=0\n\npause()\n\nwhile 1:\n while 1:\n rate(100)\n\n #move the ball\n ball.pos=ball.pos+ball.velocity*dt\n\n #check for collisions\n if(ball.pos.y+ball.radius > ceiling.pos.y-ceiling.height/2):\n ball.pos=ball.pos-ball.velocity*dt\n ball.velocity.y=-ball.velocity.y\n if(ball.pos.y-ball.radius < floor.pos.y+ceiling.height/2):\n ball.pos=ball.pos-ball.velocity*dt\n ball.velocity.y=-ball.velocity.y\n\n if((ball.pos.x+ball.radius)>paddle2.pos.x-paddle2.length/2):\n if(ball.pos.ypaddle2.pos.y-paddle2.height/2):\n ball.pos=ball.pos-ball.velocity*dt\n ball.velocity.x=-ball.velocity.x\n\n if((ball.pos.x-ball.radius)leftwall1.pos.y-leftwall1.height/2):\n ball.pos=ball.pos-ball.velocity*dt\n ball.velocity.x=-ball.velocity.x\n elif(ball.pos.yleftwall2.pos.y-leftwall2.height/2):\n ball.pos=ball.pos-ball.velocity*dt\n ball.velocity.x=-ball.velocity.x\n \n #check to see if ball is at edge of window\n if(ball.pos.x>18):\n player1score = player1score + 1\n print(\"\\nscore: player1=\", player1score, \", player2=\", player2score)\n break\n \n if(ball.pos.x<-18):\n player2score = player2score + 1\n print(\"\\nscore: player1=\", player1score, \", player2=\", player2score)\n break\n\n #get mouse position and move paddle2\n mouse=scene.mouse.pos\n if(mouse.y-paddle2.height/2>floor.y+floor.height/2 and mouse.y+paddle2.height/2ceiling.y-floor.height/2):\n# paddle2.pos.y=ceiling.y-ceiling.height/2-paddle2.height/2\n paddle2.pos=(18,0,0)\n\n\n ball.pos=vector(0,0,0)\n ball.velocity=-ball.velocity\n paddle2.pos=vector(18,0,0)\n pause()\n","sub_path":"lab-manual/game-pong/pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"591232118","text":"from django_filters import rest_framework as filters\n\nfrom core.views import BaseAssetAttrViewSet\nfrom core.models import Receive, Supplier, PurchaseOrder\nfrom supplier import serializers\n\n\nclass SupplierFilter(filters.FilterSet):\n class Meta:\n model = Supplier\n fields = {\n \"last_seen\": [\"lt\", \"gt\", \"lte\", \"gte\", \"exact\"],\n }\n\n\nclass SupplierViewSet(BaseAssetAttrViewSet):\n \"\"\"Manage Supplier in the database\"\"\"\n\n queryset = Supplier.objects.all()\n serializer_class = serializers.SupplierSerializer\n filterset_class = SupplierFilter\n search_fields = [\n \"attention\",\n \"name\",\n \"address\",\n \"city\",\n \"state\",\n \"zipcode\",\n \"contact\",\n \"term\",\n \"phone_no\",\n \"email\",\n \"payables\",\n ]\n\n\nclass ReceiveViewSet(BaseAssetAttrViewSet):\n \"\"\"Manage Receive in the database\"\"\"\n\n queryset = Receive.objects.all()\n serializer_class = serializers.ReceiveSerializer\n search_fields = [\n \"id\",\n \"date\",\n \"description\",\n \"payment_date\",\n \"payment_method\",\n \"payment_note\",\n \"grand_total\",\n \"status\",\n \"supplier__name\",\n \"purchase_order__id\",\n ]\n\n\nclass PurchaseOrderViewSet(BaseAssetAttrViewSet):\n \"\"\"Manage Supplier in the database\"\"\"\n\n queryset = PurchaseOrder.objects.all()\n serializer_class = serializers.PurchaseOrderSerializer\n search_fields = [\n \"id\",\n \"date\",\n \"description\",\n \"payment_date\",\n \"payment_method\",\n \"payment_note\",\n \"grand_total\",\n \"status\",\n \"supplier__name\",\n ]\n","sub_path":"supplier/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"191290414","text":"# Реализовать функцию, принимающую несколько параметров,\n# описывающих данные пользователя: имя, фамилия, год рождения, город проживания, email, телефон.\n# Функция должна принимать параметры как именованные аргументы. Реализовать вывод данных о пользователе одной строкой.\n\n# запращиваем данные пользователя\ninput_name = input('Введите имя пользователя: ')\ninput_surname = input('Введите фамилию пользователя: ')\ninput_year_of_birth = input('Введите дату рождения пользователя: ')\ninput_email = input('Введите электронну почту пользователя: ')\ninput_phone = input('Введите номер телефона пользователя: ')\ninput_city = input('Введите город проживания пользователя: ')\n\n\ndef users_data(name, surname, year_of_birth, city, email, phone):\n \"\"\"\n Функция выводит данные пользователя в одну строчку\n :param name: принимает имя пользователя\n :param surname: принимает фамилию пользователя\n :param year_of_birth: принимает дату рождения\n :param city: принимает город проживания\n :param email: принимает электронн��ю почту пользователя\n :param phone: принимает номер телефона\n :return: ничего не возвращает\n \"\"\"\n print(name, surname, year_of_birth, city, email, phone)\n\n\nusers_data(input_name, input_surname, input_year_of_birth, input_city, input_email, input_phone)\n","sub_path":"HW_3/HW_3.2.py","file_name":"HW_3.2.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"95196581","text":"# -*- coding=UTF-8 -*-\n# pyright: strict, reportTypeCommentUsage=false\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\n\nimport wulifang\n\nTYPE_CHECKING = False\nif TYPE_CHECKING:\n from typing import Text\n from .._types import MessageService\n\n\nclass LoggingMessageService:\n def __init__(self):\n # type: () -> None\n logger = logging.getLogger(\"wulifang\")\n logger.setLevel(logging.DEBUG)\n self._logger = logger\n\n def debug(self, message, title=\"\"):\n # type: (Text, Text) -> None\n if not wulifang.is_debug:\n return\n if title:\n message = \"[%s] %s\" % (title, message)\n self._logger.debug(message)\n\n def info(self, message, title=\"\"):\n # type: (Text, Text) -> None\n if title:\n message = \"[%s] %s\" % (title, message)\n self._logger.info(message)\n\n def error(self, message, title=\"\"):\n # type: (Text, Text) -> None\n if title:\n message = \"[%s] %s\" % (title, message)\n self._logger.error(message)\n\n\ndef _(v):\n # type: (LoggingMessageService) -> MessageService\n return v\n","sub_path":"wulifang/infrastructure/logging_message_service.py","file_name":"logging_message_service.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"483315341","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom scipy import signal\n\nfilename = input(\"filename.csv->\")\ndf = pd.read_csv(filename,header=None)\ndf = df.iloc[:,0]\n\n#sample_freq = int(int(1.0/(6204)*(10**6)) / 2) * 2\nsample_freq = 1.0/(6204)*(10**6)\nprint(sample_freq)\ndt = 1.0/sample_freq\nf1 = 0\nf2 = 30\n#t = np.arange(0.N*dt,dt)\n\n# filter_LPF = signal.firwin(numtaps=1000,cutoff=f2,fs=sample_freq,pass_zero=False)\nfilter_LPF = signal.firwin(numtaps=1000,cutoff=[f1,f2],fs=sample_freq,pass_zero=False)\n\ny1 = signal.lfilter(filter_LPF,1,df)\nFFT = np.fft.fft(y1)\ndf_FFT = np.fft.fft(df)\n\nfreq = np.fft.fftfreq(len(FFT),dt)\n\nplt.plot(filter_LPF)\nplt.legend()\nplt.show()\n\nplt.plot(y1, label=\"filtered\")\nplt.plot(df, label=\"original\")\nplt.legend()\nplt.show()\n\n#plt.plot(freq, FFT, label=\"filtered\")\n#plt.plot(freq, df_FFT, label=\"original\")\n#plt.legend()\n#plt.show()\n\namp_FFT=np.abs(FFT/(len(df)/2))\namp_df =np.abs(df_FFT/(len(df)/2))\n\nplt.plot(freq,amp_FFT, label=\"filtered\")\nplt.plot(freq,amp_df, label=\"original\")\nplt.legend()\nplt.show()\n","sub_path":"fft_Heart.py","file_name":"fft_Heart.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"340829529","text":"#!/usr/bin/env python\n# Prepared for DARPA AlphaDogfight Trials\n# Developed by JHU/APL, Aug 2019\nimport time\nimport logging\nimport logging.config\n#import yaml\n\nfrom adt import Configuration, Manager\n\nimport adt.agents.bud_fsm as red\nfrom SimCode import fastcube as blue\n\nimport sample_test as test\n\nimport numpy as np\nimport boto3\nimport pickle\nimport argparse\n\nfrom decimal import *\n\n\nclass Configurator:\n def __init__(self, logger):\n #LOAD UP ALL OUR IMPORTANT INFO\n #Initialize-------------------------------------------------------------------------------------------\n #Take in argumetns.\n parser = argparse.ArgumentParser(description=\"Accepts Integers.\")\n parser.add_argument('integers', metavar='N', type=int, nargs='+', help='an integer for the accumulator')\n args = parser.parse_args()\n trialNum = args.integers[0]\n iteration = args.integers[1]\n\n #Grab Table Entry\n db = boto3.resource(\"dynamodb\", region_name = 'us-west-1')\n table = db.Table(\"ADFEval\" + str(iteration))\n simInfo = table.get_item(\n \tKey={\n \t\t'TrialNum': trialNum,\n \t}\n )['Item']\n\n policyNum = simInfo['PolicyNum']\n confNum = simInfo['Configuration']\n\n #Grab Policy\n s3 = boto3.resource('s3', region_name = 'us-west-1')\n bucket = s3.Bucket('adfproject')\n directory_name = \"policy\" + str(iteration)\n with open('pdump', 'wb') as data:\n \tbucket.download_fileobj(directory_name + '/policy', data)\n with open('pdump', 'rb') as data:\n \tController = pickle.load(data)\n #Set to sampled policy.\n #This controller can be made a lot more light weight later.\n Controller.setGraph(int(policyNum))\n\n #Grab Configuration\n directory_name = \"config\" + str(iteration)\n bucket.download_file(directory_name + '/rc' + str(confNum) + '.xml', \"conf.xml\")\n\n #Initiation Finish\n\n configuration = Configuration(\"conf.xml\")\n # config.write_configuration(\"sample_output_config.xml\")\n\n self.manager = Manager(logger, configuration)\n self.gym_env = self.manager.get_gym_env()\n self.red = red.Agent(self.gym_env.red_action_space, self.gym_env.red_observation_space)\n self.blue = blue.Agent(self.gym_env.blue_action_space, self.gym_env.blue_observation_space)\n self.blue.setpolicy(Controller)\n self.manager.set_red_agent(self.red)\n self.manager.set_blue_agent(self.blue)\n self.test = test.Sample(logger, self.manager)\n\n #Update Table\n sim_time = self.manager._red.state[self.manager._red.info['red_simulation_sim_time_sec']]/300\n getcontext().prec = 3\n reward = Decimal(self.manager._blue.reward) - Decimal(self.manager._red.reward)\n if(self.manager._red.reward <= -1):\n reward += Decimal(1 - sim_time)\n table.update_item(\n Key={\n \t\t'TrialNum': trialNum,\n },\n UpdateExpression=\"SET reward = :r\",\n ExpressionAttributeValues={\n ':r': reward,\n }\n )\n\n\n\n\nif __name__ == \"__main__\":\n# with open('logger_config.yaml', 'r') as f:\n# config = yaml.safe_load(f.read())\n# logfile = f'{config[\"handlers\"][\"file\"][\"filename\"]}-{time.strftime(\"%Y%m%d-%H%M%S\",time.gmtime(time.time()))}.log'\n# config[\"handlers\"][\"file\"][\"filename\"] = logfile\n# logging.config.dictConfig(config)\n\n logger = logging.getLogger(__name__)\n\n Configurator(logger)\n","sub_path":"Docker2/compete_fastdice.py","file_name":"compete_fastdice.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"382845645","text":"from django.conf.urls import url\nfrom . import views \n\n\nurlpatterns = [\n url(r'^$', views.index, name=\"index\"),\n # url(r'^(?P\\d+)/edit$', views.edit, name=\"edit\"),\n url(r'^(?P\\d+)/profile$', views.profile, name=\"profile\"),\n url(r'^(?P\\d+)/addfave$', views.addfave, name = \"addfave\"),\n url(r'^(?P\\d+)/unfave$', views.unfave, name = \"unfave\"), \n url(r'^add$', views.add, name=\"add\"),\n # url(r'^(?P\\d+)/destroy$', views.destroy, name=\"destroy\"),\n # url(r'^(?P\\d+)/update$', views.update, name=\"update\") \n ]","sub_path":"apps/belt/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"52793331","text":"import os\nimport re\nimport skimage.io\nimport skimage.color\nimport numpy as np\nimport argparse\n\n\ndef change_saturation(image, saturation):\n image_hsv = skimage.color.rgb2hsv(image)\n if (image_hsv[:, :, 1] == 0).all():\n return image\n hsv = image_hsv[:, :, 1].copy()\n hsv *= saturation\n hsv[hsv > 1] = 1\n image_hsv[:, :, 1] = hsv\n return skimage.color.hsv2rgb(image_hsv)\n\n\ndef augment(N, image_root):\n images = []\n root = os.path.join(image_root, 'train')\n for dirname, dirnames, filenames in os.walk(root):\n for filename in filenames:\n if re.match(\"[0-9]+.jpg\", filename):\n images.append(os.path.join(dirname, filename))\n\n categories = np.loadtxt(\"development_kit/data/categories.txt\", delimiter=\" \", dtype=str)\n fh = open(\"development_kit/data/train.txt\", \"w\")\n for image_path in sorted(images):\n print(image_path)\n path, name = os.path.split(image_path)\n cat = os.path.relpath(path, root)\n idx = int(categories[categories[:, 0] == \"/\" + cat, 1][0])\n\n image = skimage.io.imread(image_path)\n for saturation in np.linspace(0, 2, N):\n new_image_name = \"{}-{}.jpg\".format(os.path.splitext(name)[0], saturation)\n new_image_path = os.path.join(path, new_image_name)\n fh.write(\"{} {}\\n\".format(os.path.join(\"train\", cat, new_image_name), idx))\n if os.path.exists(new_image_path):\n try:\n skimage.io.imread(new_image_path)\n except:\n pass\n else:\n continue\n\n new_image = change_saturation(image, saturation)\n skimage.io.imsave(new_image_path, new_image)\n \n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='Train and evaluate a net on the MIT mini-places dataset.')\n parser.add_argument('--image_root', default='./images/',\n help='Directory where images are stored')\n parser.add_argument('--N', default=9, type=int,\n help='Number of new images to generate')\n args = parser.parse_args()\n \n augment(args.N, args.image_root)\n","sub_path":"augment.py","file_name":"augment.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"43725009","text":"import sys\n\ndef art(s, n):\n res = s\n g = \"G\" * len(s)\n for i in xrange(n-1):\n res = ''.join(s if c == 'L' else g for c in res)\n return res\n\nlines = open(sys.argv[1], \"rb\").read().splitlines()\nt = int(lines[0])\nres = []\n\nfor i in xrange(1, t + 1):\n k, c, s = map(int, lines[i].split())\n res.append(\"Case #%d: %s\\n\" % (i, ' '.join(map(str, xrange(1, k+1)))))\n\nopen(sys.argv[2], \"wb\").write(''.join(res))\n","sub_path":"codes/CodeJamCrawler/16_0_4_neat/16_0_4_royiarchy_d_answer.py","file_name":"16_0_4_royiarchy_d_answer.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"539612275","text":"import numpy as np\r\n\r\n# Load configurations\r\nscInformation = np.load(\"config/SC Information.npz\")\r\nnfInformation = np.load(\"config/NF Information.npz\")\r\nsnInformation = np.load(\"config/SN Information.npz\")\r\nsystemInformation = np.load(\"config/System Information.npz\")\r\n\r\n# System Information\r\nmaxTime = systemInformation['maxTime']\r\nVs = systemInformation['Vs']\r\nlenOfVs = len(Vs)\r\narrivals = systemInformation['arrivals'] # arrivals[c, t]\r\n\r\n# Network Information\r\nnumOfNF = nfInformation['numOfNF']\r\nprocessingCost = nfInformation['processingCost'] # processingCost[f]\r\n\r\n# Service Chain Information\r\nnumOfSC = scInformation['numOfSC']\r\nlengthOfSC = scInformation['lengthOfSC']\r\nserviceChains = scInformation['serviceChains'] # serviceChains[c, i]\r\n\r\n# Substrate Network Information (mainly about the servers)\r\nnumOfServer = snInformation['numOfServer']\r\nserverCapacities = snInformation['serverCapacities'] # serverCapacities[s]\r\nidleEnergies = snInformation['idleEnergies'] # idleEnergies[s]\r\nmaxEnergies = snInformation['maxEnergies'] # maxEnergies[s]\r\n\r\n# the observed states of **previous** time-slot, we maintain states for each parameter V\r\n\r\n# queueBacklogs[V][s, f, c] saves the queue backlogs of server s, VM f, type c.\r\n# (Notice that f here means both Network Functions and VMs on the server)\r\nqueueBacklogs = {V: np.zeros((numOfServer, numOfNF, numOfSC), dtype=int) for V in Vs}\r\n\r\n# VMStates[V][s, f] maintains the on-off states of the VM f on server s. \"True\" means \"on\" and \"False\" means \"off\".\r\n# VMStates = {V: np.zeros((numOfServer, numOfNF), dtype=bool) for V in Vs}\r\n\r\n# resourceAllocations[V][s, f, c] denotes how many resources is allocated to type c on VM f, on server s.\r\nresourceAllocations = {V: np.zeros((numOfServer, numOfNF, numOfSC), dtype=int) for V in Vs}\r\nactualServices = {V: np.zeros((numOfServer, numOfNF, numOfSC), dtype=int) for V in Vs}\r\n\r\n# placements[V][(c, f)] = s means the NF f of service chain c is placed on server s\r\nplacements = {V: {} for V in Vs}\r\n\r\navgQueueBacklogs = {V: np.zeros(maxTime) for V in Vs}\r\navgEnergyCosts = {V: np.zeros(maxTime) for V in Vs}\r\n\r\n\r\ndef VNFPlacement(V, queues):\r\n # print(\"VNFPlacement\")\r\n placement = {}\r\n # For each service chain type c, and one of its function f, we need to decide on which server to place it\r\n for c in range(numOfSC):\r\n for i in range(lengthOfSC):\r\n f = serviceChains[c][i]\r\n pair = tuple([c, f])\r\n queue = queues[:, f, c]\r\n chosenServer = np.argmin(queue)\r\n placement[pair] = chosenServer\r\n\r\n return placement\r\n\r\n\r\ndef ResourceAllocation(V, queues):\r\n # print(\"ResourceAllocation\")\r\n allocation = np.zeros((numOfServer, numOfNF, numOfSC))\r\n for s in range(numOfServer):\r\n term1 = V * (maxEnergies[s] - idleEnergies[s]) / float(serverCapacities[s])\r\n weights = term1 * np.ones((numOfNF, numOfSC))\r\n for f in range(numOfNF):\r\n for c in range(numOfSC):\r\n weights[f, c] -= queues[s, f, c] / float(processingCost[f])\r\n (chosenVM, chosenType) = np.unravel_index(weights.argmin(), weights.shape)\r\n if weights[chosenVM, chosenType] < 0:\r\n allocation[s, chosenVM, chosenType] = serverCapacities[s]\r\n\r\n return allocation\r\n\r\n\r\ndef QueueUpdate(V, queues, services, placement):\r\n # print(\"QueueUpdate\")\r\n for s in range(numOfServer):\r\n for f in range(numOfNF):\r\n for c in range(numOfSC):\r\n queues[s, f, c] -= services[s, f, c]\r\n if tuple([c, f]) in placement.keys() and placement[tuple([c, f])] == s:\r\n if f == serviceChains[c][0]:\r\n queues[s, f, c] += arrivals[c][t]\r\n else:\r\n chain = list(serviceChains[c, :])\r\n fPre = serviceChains[c][chain.index(f) - 1]\r\n for ss in range(numOfServer):\r\n queues[s, f, c] += services[ss, fPre, c]\r\n\r\n return queues\r\n\r\n\r\ndef ServiceUpdate(V, queues, allocation):\r\n # print(\"ServiceUpdate\")\r\n services = np.zeros((numOfServer, numOfNF, numOfSC))\r\n for s in range(numOfServer):\r\n for f in range(numOfNF):\r\n for c in range(numOfSC):\r\n services[s, f, c] = min(allocation[s, f, c] / processingCost[f], queues[s, f, c])\r\n\r\n return services\r\n\r\n\r\ndef VNFGreedy(t, V):\r\n '''\r\n :param t: current time-slot.\r\n :param V: the trade-off parameter of queue backlog and cost\r\n :return: the total queue backlogs and total energy cost incurred in this time-slot\r\n '''\r\n global queueBacklogs, VMStates, resourceAllocations, placements\r\n\r\n # Part 1: calculate placements\r\n placements[V] = VNFPlacement(V, queueBacklogs[V])\r\n\r\n queueBacklogs[V] = QueueUpdate(V, queueBacklogs[V], actualServices[V], placements[V])\r\n\r\n resourceAllocations[V] = ResourceAllocation(V, queueBacklogs[V])\r\n\r\n actualServices[V] = ServiceUpdate(V, queueBacklogs[V], resourceAllocations[V])\r\n\r\n\r\ndef calculateAvgQueueBacklog(V):\r\n queues = queueBacklogs[V]\r\n total = 0\r\n for s in range(numOfServer):\r\n for f in range(numOfNF):\r\n for c in range(numOfSC):\r\n total += queues[s, f, c]\r\n\r\n # return total/float(numOfServer * numOfNF * numOfSC)\r\n return total\r\n\r\n\r\ndef calculateAvgEnergyCost(V):\r\n services = actualServices[V]\r\n total = 0\r\n for s in range(numOfServer):\r\n for f in range(numOfNF):\r\n for c in range(numOfSC):\r\n total += services[s, f, c] * processingCost[f]\r\n\r\n # return total/float(numOfServer * numOfNF * numOfSC)\r\n return total\r\n\r\nif __name__ == \"__main__\":\r\n for V in Vs:\r\n for t in range(maxTime):\r\n print(\"Now V is\", V, \" and time slot is\", t)\r\n VNFGreedy(t, V)\r\n avgQueueBacklogs[V][t] = calculateAvgQueueBacklog(V)\r\n avgEnergyCosts[V][t] = calculateAvgEnergyCost(V)\r\n\r\n avgQueueBacklogsNew = np.zeros((lenOfVs, maxTime))\r\n avgEnergyCostsNew = np.zeros((lenOfVs, maxTime))\r\n for i in range(lenOfVs):\r\n avgQueueBacklogsNew[i, :] = np.array(avgQueueBacklogs[Vs[i]])\r\n avgEnergyCostsNew[i, :] = np.array(avgEnergyCosts[Vs[i]])\r\n\r\n np.savez(\"results/avgQueueBacklogs.npz\", avgQueueBacklogs=avgQueueBacklogsNew)\r\n np.savez(\"results/avgEnergyCosts.npz\", avgEnergyCosts=avgEnergyCostsNew)\r\n print(\"main\")","sub_path":"Simulation.py","file_name":"Simulation.py","file_ext":"py","file_size_in_byte":6436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"647464134","text":"\nimport numpy as np\nimport scipy.optimize as opt\nimport time\n\ndef rosen(x):\n\treturn sum(100.0 * (x[1:] - x[:-1] ** 2.0) ** 2.0 + (1 - x[:-1]) ** 2.0)\n\ndef rosen_der(x):\n\txm = x[1:-1]\n\txm_m1 = x[:-2]\n\txm_p1 = x[2:]\n\tder = np.zeros_like(x)\n\tder[1:-1] = 200 * (xm - xm_m1 ** 2) - 400 * (xm_p1 - xm ** 2) * xm - 2 * (1 - xm)\n\tder[0] = -400 * x[0] * (x[1] - x[0] ** 2) - 2 * (1 - x[0])\n\tder[-1] = 200 * (x[-1] - x[-2] ** 2)\n\treturn der\n\ndef rosen_hess(x):\n\tx = np.asarray(x)\n\tH = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1)\n\tdiagonal = np.zeros_like(x)\n\tdiagonal[0] = 1200 * x[0] ** 2 - 400 * x[1] + 2\n\tdiagonal[-1] = 200\n\tdiagonal[1:-1] = 202 + 1200 * x[1:-1] ** 2 - 400 * x[2:]\n\tH = H + np.diag(diagonal)\n\treturn H\n\ndef execTestLinearAlgebra(n):\n\tt1 = time.time()\n\ta = np.random.random((n, n))\n\tb = np.random.random((n, n))\n\tadd = np.add(a, b)\n\tsub = np.subtract(a, b)\n\tmul = np.multiply(a, b)\n\tdiv = np.divide(a, b)\n\tdot = np.dot(a, b)\n\tmed = np.median(a, axis = 1)\n\taT = np.transpose(a)\n\taInv = np.linalg.inv(a)\n\taEig = np.linalg.eig(a)\n\tU, s, V = np.linalg.svd(a, full_matrices=True)\n\tt2 = time.time()\n\treturn t2 - t1\n\ndef execTestOptimization(n):\n\tt1 = time.time()\n\tx0 = 10 * np.random.random((1, n))\n\tres = opt.minimize(rosen, x0, method = 'nelder-mead', options = {'xtol': 1e-8, 'disp': False})\n\tres = opt.minimize(rosen, x0, method = 'BFGS', jac = rosen_der, options = {'disp': False})\n\tres = opt.minimize(rosen, x0, method='Newton-CG', jac=rosen_der, hess=rosen_hess, options={'xtol': 1e-8, 'disp': False})\n\tt2 = time.time()\n\treturn t2 - t1\n\ndef getExecTest(pow = 3):\n\tLAtime = []\n\tOptTime = []\n\tfor i in np.arange(1, pow):\n\t\tn = np.power(10, i)\n\t\tLAtime.append(execTestLinearAlgebra(n))\n\t\tOptTime.append(execTestOptimization(n))\n\treturn LAtime, OptTime\n\nif __name__ == '__main__':\n\tLAtime, OptTime = getExecTest()\n\tprint('Time (Linear Algebra): ', LAtime)\n\tprint('Time (Optimization): ', OptTime)\t\n","sub_path":"pythonBenchmark.py","file_name":"pythonBenchmark.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"418747525","text":"from room import Room\nfrom player import Player\nfrom world import World\n\nimport random\nfrom ast import literal_eval\n\n# Load world\nworld = World()\n\n\n# You may uncomment the smaller graphs for development and testing purposes.\n# map_file = \"maps/test_line.txt\"\n# map_file = \"maps/test_cross.txt\"\n# map_file = \"maps/test_loop.txt\"\n# map_file = \"maps/test_loop_fork.txt\"\nmap_file = \"maps/main_maze.txt\"\n\n# Loads the map into a dictionary\nroom_graph=literal_eval(open(map_file, \"r\").read())\nworld.load_graph(room_graph)\n\n# Print an ASCII map\nworld.print_rooms()\n\nplayer = Player(world.starting_room)\n\nclass Queue():\n def __init__(self):\n self.queue = []\n def enqueue(self, value):\n self.queue.append(value)\n def dequeue(self):\n if self.size() > 0:\n return self.queue.pop(0)\n else:\n return None\n def size(self):\n return (len(self.queue))\n\n# Fill this out with directions to walk\n# traversal_path = ['n', 'n']\ntraversal_path = []\ngraph = {}\n\nprint(player.current_room.id)\nprint(player.current_room.get_exits())\n\ndirections = ('n', 's', 'e', 'w')\n\ninverseDirections = {'n': 's', 's': 'n', 'w': 'e', 'e': 'w'}\ndef traverseMap(player, direction = ''):\n \n # Check if all rooms have been explored and stop if they have.\n if len(graph.keys()) == 500:\n return\n # While the map is not completely explored\n # If the room doesn't exist\n\n currentRoom = player.current_room.id\n\n if player.current_room.id not in graph:\n # Initialize in your room graph with '?' exits\n graph[player.current_room.id] = {}\n for exit in player.current_room.get_exits():\n graph[player.current_room.id][exit] = '?'\n\n # If coming from another room\n if direction is not '':\n # find opposite direction of current travel\n opposite = inverseDirections[direction]\n # set prevRoom using Room method 'getRoomInDirection'\n prevRoom = player.current_room.get_room_in_direction(opposite)\n # Update the graph the entry for previous room\n graph[currentRoom][opposite] = prevRoom.id\n\n new_direction = '?'\n\n # If there is an unexplored exit in the current room (i.e. a '?' exit), travel in that direction\n for exit in player.current_room.get_exits():\n if graph[currentRoom][exit] == '?':\n # if the current room has an unexplored exit set the new_direction to that exit\n new_direction = exit\n # travel there and append the current exit to the traversal path\n player.travel(exit)\n traversal_path.append(exit)\n # set new_room to the player's current room and set the previous room's exit to the new room\n new_room = player.current_room.id\n graph[currentRoom][exit] = new_room\n # Walk there\n traverseMap(player, exit)\n break\n\n # Else, find the nearest room using BFS with an unexplored exit and travel there\n # Set a travel_path\n travel_path = []\n\n if new_direction is '?':\n # Setup a new Queue with the currentRoom\n q = Queue()\n visited = set()\n q.enqueue([currentRoom])\n\n while q.size() > 0:\n # While there is something in the Queue take out the last item and set current room to the last item in path\n path = q.dequeue()\n currentRoom = path[-1]\n\n if currentRoom not in visited:\n visited.add(currentRoom)\n\n # If currentRoom has an unexplored exit\n if '?' in graph[currentRoom].values():\n # Return path to that room and reset the queue\n travel_path = path\n q = Queue()\n break\n\n for neighbor in graph[currentRoom].values():\n # for every direction in the current room add it to the path to search through and add it to the queue\n new_path = list(path)\n new_path.append(neighbor)\n q.enqueue(new_path)\n\n for r in travel_path:\n # for every room in the travel path\n room = player.current_room.id\n g_keys = graph[room].keys()\n for d in g_keys:\n # For every room we walked along add the values that match to that room to our traversal path\n if graph[room][d] == r:\n player.travel(d)\n traversal_path.append(d)\n\n # Explore the map again now that we are at a room with an unexplored exit\n traverseMap(player)\n\ntraverseMap(player)\n\n\n# print(graph)\n# print(traversal_path)\n# print(\"\\n*****\")\n\n\n# TRAVERSAL TEST\nvisited_rooms = set()\nplayer.current_room = world.starting_room\nvisited_rooms.add(player.current_room)\n\nfor move in traversal_path:\n player.travel(move)\n visited_rooms.add(player.current_room)\n\nif len(visited_rooms) == len(room_graph):\n print(f\"TESTS PASSED: {len(traversal_path)} moves, {len(visited_rooms)} rooms visited\")\nelse:\n print(\"TESTS FAILED: INCOMPLETE TRAVERSAL\")\n print(f\"{len(room_graph) - len(visited_rooms)} unvisited rooms\")\n\n\n\n#######\n# UNCOMMENT TO WALK AROUND\n#######\nplayer.current_room.print_room_description(player)\nwhile True:\n cmds = input(\"-> \").lower().split(\" \")\n if cmds[0] in [\"n\", \"s\", \"e\", \"w\"]:\n player.travel(cmds[0], True)\n elif cmds[0] == \"q\":\n break\n else:\n print(\"I did not understand that command.\")\n","sub_path":"projects/adventure/adv.py","file_name":"adv.py","file_ext":"py","file_size_in_byte":5447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"530848808","text":"__author__ = 'sfesly'\nfrom sklearn import tree\nimport sklearn\ndef demo_basic():\n X = [[0, 0], [1,1]]\n Y = [0, 1]\n clf = tree.DecisionTreeClassifier()\n clf = clf.fit(X, Y)\n print(clf.predict([[2.,2.], [3.,4.]]))\n print(clf.predict_proba([[2.,2.], [3.,4.]]))\ndef demo_Iris():\n from sklearn.datasets import load_iris\n from sklearn import tree\n iris = load_iris()\n clf = tree.DecisionTreeClassifier()\n clf = clf.fit(iris.data, iris.target)\n\n from sklearn.externals.six import StringIO\n import pydot\n dot_data = StringIO()\n tree.export_graphviz(clf, out_file=dot_data)\n graph = pydot.graph_from_dot_data(dot_data.getvalue())\n graph.write_pdf('iris.pdf')\ndef demo_regression():\n print(__doc__)\n\n # Import the necessary modules and libraries\n import numpy as np\n from sklearn.tree import DecisionTreeRegressor\n import matplotlib.pyplot as plt\n\n # Create a random dataset\n rng = np.random.RandomState(1)\n X = np.sort(5 * rng.rand(80, 1), axis=0)\n y = np.sin(X).ravel()\n y[::5] += 3 * (0.5 - rng.rand(16))\n\n # Fit regression model\n clf_1 = DecisionTreeRegressor(max_depth=2)\n clf_2 = DecisionTreeRegressor(max_depth=5)\n clf_1.fit(X, y)\n clf_2.fit(X, y)\n\n # Predict\n X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]\n y_1 = clf_1.predict(X_test)\n y_2 = clf_2.predict(X_test)\n\n # Plot the results\n plt.figure()\n plt.scatter(X, y, c=\"k\", label=\"data\")\n plt.plot(X_test, y_1, c=\"g\", label=\"max_depth=2\", linewidth=2)\n plt.plot(X_test, y_2, c=\"r\", label=\"max_depth=5\", linewidth=2)\n plt.xlabel(\"data\")\n plt.ylabel(\"target\")\n plt.title(\"Decision Tree Regression\")\n plt.legend()\n plt.show()\n\nif __name__ == \"__main__\":\n demo_basic()","sub_path":"machine learning/scikit demo/decision_tree.py","file_name":"decision_tree.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"149806833","text":"#!/usr/bin/env python\n\n# Script for parsing prometheus metrics format and send it into zabbix server\n# MIT License\n# https://github.com/Friz-zy/telegraf-monitoring-agent-setup\n\nimport re\nimport os\nimport sys\nimport time\nimport json\nimport socket\nimport optparse\ntry:\n from urllib.request import urlopen\nexcept:\n from urllib import urlopen\n\nMETRICS = {\n 'default': {\n 'sort_labels': ['name', 'id', 'host', 'path', 'device', 'source', 'cpu'],\n },\n 'docker_container_': {\n 'sort_labels': ['host', 'source', 'device', 'cpu'],\n },\n}\n\ndef parse(source='http://127.0.0.1:9273/metrics'):\n # https://prometheus.io/docs/practices/naming/\n # https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels\n regex = re.compile(r'^(?P[a-zA-Z_:][a-zA-Z0-9_:]*)(?P{.*})?\\s+(?P.+)(\\s+(?P\\w+))?$')\n help_line = ''\n type_line = ''\n metrics = []\n\n text = urlopen(source).read()\n\n for line in text.splitlines():\n line = line.decode(\"utf-8\")\n\n if line[0:6] == '# HELP':\n help_line = line\n continue\n elif line[0:6] == '# TYPE':\n type_line = line\n continue\n elif line[0] == '#':\n continue\n\n metric = regex.match(line).groupdict()\n metric['line_raw'] = line\n metric['help'] = help_line\n metric['type'] = type_line\n metric['source'] = source\n metrics.append(metric)\n\n return metrics\n\ndef main():\n parser = optparse.OptionParser()\n source = 'http://127.0.0.1:9273/metrics'\n destination = '/tmp/prom2zabbix'\n parser.set_defaults(source=source,\n destination=destination,\n hostname='')\n parser.add_option(\"-s\", \"--source\", dest=\"source\",\n help=\"Prometheus source, default is \" + source)\n parser.add_option(\"-d\", \"--destination\", dest=\"destination\",\n help=\"Output .keys and .metrics files pattern, default is \" + destination)\n (options, args) = parser.parse_args()\n\n seconds = int(time.time())\n metrics = parse(options.source)\n\n data = {\"data\": []}\n keys = {}\n\n # fill and prepare metric\n for metric in metrics:\n if not metric['timestamp']:\n metric['timestamp'] = seconds\n if not metric['labels']:\n metric['labels'] = '{}'\n else:\n # limit lenght of metric because of zabbix limit\n # for graph name even 132 char is too long\n if len(metric['metric']) + len(metric['labels']) > 200:\n metric['original_labels'] = metric['labels'].replace(',', ';')\n short_labels = []\n for label in metric['labels'].lstrip('{').rstrip('}').split(','):\n for key in METRICS.keys():\n if key in metric['metric'] and key != 'default':\n for l in METRICS[key]['sort_labels']:\n if l in label:\n short_labels.append(label)\n break\n metric['labels'] = '{' + ';'.join(short_labels) + '}'\n else:\n metric['labels'] = metric['labels'].replace(',', ';')\n\n # hacks\n if metric['metric'] == 'procstat_created_at':\n metric['value'] = metric['value'].replace('e+18', 'e+09')\n\n m = {}\n for k, v in metric.items():\n m[\"{#%s}\" % k.upper()] = v\n data[\"data\"].append(m)\n\n # addition for metric labels macro\n if metric['metric'] not in keys:\n keys[metric['metric']] = {\"data\": []}\n keys[metric['metric']][\"data\"].append({\n \"{#LABELS}\": metric['labels']})\n\n # write metrics\n with open(options.destination + '.metrics', 'w') as f:\n for metric in metrics:\n # https://www.zabbix.com/documentation/3.0/manpages/zabbix_sender\n escaped_labels = metric['labels'].replace('\\\\', '\\\\\\\\').replace('\"', '\\\\\"')\n f.write('- \"telegraf[%s,%s]\" %s %s\\n' % (\n metric['metric'],\n escaped_labels,\n metric['timestamp'],\n metric['value']))\n\n # write keys\n with open(options.destination + '.keys', 'w') as f:\n for metric in keys:\n f.write('- \"telegraf[keys, %s]\" %s \"%s\"\\n' % (\n metric,\n seconds,\n json.dumps(keys[metric]\n ).replace('\\\\', '\\\\\\\\').replace('\"', '\\\\\"')))\n data = json.dumps(data)\n escaped_data = data.replace('\\\\', '\\\\\\\\').replace('\"', '\\\\\"')\n f.write('- \"telegraf[keys]\" %s \"%s\"\\n' % (\n seconds,\n escaped_data))\n\n # print(data)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"zabbix/prom2zabbix.py","file_name":"prom2zabbix.py","file_ext":"py","file_size_in_byte":4779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"311533619","text":"#!/usr/bin/env python\n\"\"\"\nMaxwellian shaped particle precipitation differential number flux\n\"\"\"\nimport ncarglow as glow\nimport ncarglow.plots as plot\nfrom datetime import datetime\nfrom matplotlib.pyplot import show\n\n\ntime = datetime(2015, 12, 13, 10, 0, 0)\nglat = 65.1\nglon = -147.5\n# %% flux [erg cm-2 s-1 == mW m-2 s-1]\nQ = 1\n# %% characteristic energy [eV]\nEchar = 100e3\n# %% Number of energy bins\nNbins = 250\n\niono = glow.maxwellian(time, glat, glon, Q, Echar, Nbins)\n# %% plots\nplot.precip(iono[\"precip\"])\nplot.ver(iono)\nplot.density(iono)\nplot.temperature(iono)\n\nshow()\n","sub_path":"Examples/Maxwellian.py","file_name":"Maxwellian.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"596740599","text":"import logging\nfrom json import JSONDecoder\n\nfrom app.models import common\nfrom app.models import composite\nfrom app.common import convert_json_serializable_list\nfrom flask import request\nfrom flask_restful import Resource\n\n\nclass Test(Resource):\n def get(self):\n print(\"test\")\n return {\"result\": \"success\"}, 200\n\n def post(self):\n _data = request.data.decode(encoding='UTF-8')\n _json = JSONDecoder().decode(_data)\n return {\"result\": \"success\"}, 200\n\n\nclass Team(Resource):\n def get(self, team_id):\n \"\"\"\n 팀의 id 에 매칭되는 정보를 반환한다\n :param team_id: Team.id\n :return:\n \"\"\"\n if len(team_id) < 6:\n return \"ID form is illegal.\", 404\n _session_id = int(team_id[:-5])\n _team_digit = int(team_id[-5:])\n _team = common.Team.search_by_session_team_id(_session_id, _team_digit)\n\n if _team is None:\n logging.info(\"TEAM is not exist. \"\n \"Session is {0}, digit is {1}\"\n .format(_session_id, _team_digit))\n return \"No Such session or team id\", 404\n # 멤버들의 이름은 게임이 끝나고 난 후에 sync 되기 때문에 이곳에서는 필요가 없다\n # _members = primitives.Member.get_members_as_string(_team.id)\n # if not _members:\n # logging.info(\"Member is not exist. id is \" + str(team_id))\n # return None, 404\n # _team.members = _members\n\n _game_c = common.Game.\\\n query.\\\n filter(common.Game.session == _session_id,\n common.Game.date == common.Game.get_current_date()).\\\n one()\n\n _result = dict(game=_game_c.to_json(), team=_team.to_json())\n # logging.info(_result)\n return _result, 200\n\n\nclass Teams(Resource):\n def post(self):\n \"\"\"\n form-data 의 형식으로 온 생성정보를 받아서 게임과 팀을 생성한다.\n [{\"job\":\"curator\", \"count\":30},\n {\"job\":\"archivist\", \"count\":30},\n {\"job\":\"educator\", \"count\":30}]\n :return:\n \"\"\"\n _data = request.get_json(force=True)\n _spec = _data.get(\"spec\")\n _result = composite.GameTeam.create_many(_spec)\n return _result, 200\n\n\nclass ResultTeam(Resource):\n def get(self, team_id):\n \"\"\"\n 팀의 id 에 매칭되는 정보를 반환한다\n :param team_id: Team.id\n :return:\n \"\"\"\n if len(team_id) < 6:\n return \"ID form is illegal.\", 404\n _session_id = int(team_id[:-5])\n _team_digit = int(team_id[-5:])\n _team = common.Team.search_by_session_team_id(_session_id, _team_digit)\n\n if _team is None:\n logging.info(\"TEAM is not exist. \"\n \"Session is {0}, digit is {1}\"\n .format(_session_id, _team_digit))\n return \"No Such session or team id\", 404\n # 멤버들의 이름은 게임이 끝나고 난 후에 sync 되기 때문에 이곳에서는 필요가 없다\n # _members = primitives.Member.get_members_as_string(_team.id)\n # if not _members:\n # logging.info(\"Member is not exist. id is \" + str(team_id))\n # return None, 404\n # _team.members = _members\n\n _game_c = common.Game.search_by_session(_session_id)\n _identity_c = common.Team.get_identity_by_job_and_identity_id(_team.job, _team.identity_id)\n\n _result = dict(game=_game_c.to_json(), team=_team.to_json(), identity=_identity_c.to_json())\n # logging.info(_result)\n return _result, 200\n\nclass Member(Resource):\n def post(self, team_id: str, members: str):\n \"\"\"\n :param team_id: Team.id\n :param members: \"FRED,DAVID,MIKE\"\n :return:\n \"\"\"\n if len(team_id) < 6:\n return \"ID form is illegal.\", 404\n _session_id = int(team_id[:-5])\n _team_digit = int(team_id[-5:])\n\n common.Team.update_members(_session_id, _team_digit, members)\n common.Member.create_many_from_string(team_id, members)\n return {\"result\": \"success\"}, 200\n\n\nclass Location(Resource):\n def post(self, game_id: int, team_id: int, location_id: int):\n \"\"\"\n :param team_id: Team.id\n :param location_id: Location.id\n :return:\n \"\"\"\n common.Position.create(game_id, team_id, location_id)\n return {\"result\": \"success\"}, 200\n\n\nclass TrackGame(Resource):\n def get(self, game_id: int):\n _positions_c = common.Position.search_by_game_id(game_id)\n _locations_c = common.Location.get_all()\n _result = dict(positions=convert_json_serializable_list(_positions_c),\n locations=convert_json_serializable_list(_locations_c))\n return _result, 200\n\n\nclass TrackGameJob(Resource):\n def get(self, game_id: int, job_families: str):\n _positions_c = common.Position.search_by_game_id_and_job_family(game_id, job_families)\n _locations_c = common.Location.get_all()\n _result = dict(positions=convert_json_serializable_list(_positions_c),\n locations=convert_json_serializable_list(_locations_c))\n return _result, 200\n\n\nclass TrackGameTeam(Resource):\n def get(self, game_id: int, team_id: int):\n _positions_c = common.Position.search_by_game_and_team_id(game_id, team_id)\n _locations_c = common.Location.get_all()\n _result = dict(positions=convert_json_serializable_list(_positions_c),\n locations=convert_json_serializable_list(_locations_c))\n return _result, 200\n","sub_path":"app/http_handler/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":5657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"511937076","text":"from django.views import generic\nfrom django.views.generic import ListView, DetailView\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib import messages\nfrom .models import Book, Author, BorrowRequest\n\n# home page at root URL\ndef index(request):\n latest_books = Book.objects.all()[:3]\n context = {'latest_books': latest_books}\n return render(request, 'good_library/index.html', context)\n\nclass BookListView(generic.ListView):\n template_name = 'good_library/book_list.html'\n context_object_name = 'book_list'\n \n def get_queryset(self):\n return Book.objects.all()\n\n# used by filters on book list page\nclass BookListFilterView(generic.ListView):\n template_name = 'good_library/book_list.html'\n context_object_name = 'book_list'\n def get_queryset(self):\n if (self.kwargs['book_filter'] == \"my\" and self.request.user.is_authenticated()):\n return Book.objects.filter(owner=self.request.user)\n else:\n return Book.objects.all()\n\nclass BookDetailView(generic.DetailView):\n model = Book\n\nclass AuthorListView(generic.ListView):\n model = Author\n\nclass AuthorDetailView(generic.DetailView):\n model = Author\n\n# return two lists of BorrowRequests:\n# 1. list of requests logged in user has issued for other users' books - borrowrequest_list\n# 2. list of requests other users created for logged in users' books - others_request_list\nclass BorrowRequestListView(generic.ListView):\n \n def get_context_data(self, **kwargs):\n # Call the base implementation first to get a context\n context = super(BorrowRequestListView, self).get_context_data(**kwargs)\n # Add in a QuerySet of others_request_list\n # use doble underscore notation (__), as per:\n # https://docs.djangoproject.com/en/1.7/topics/db/queries/#lookups-that-span-relationships\n context['others_request_list'] = BorrowRequest.objects.filter(book__owner=self.request.user)\n return context\n \n # get only BorrowRequests the logged in user requested\n # this is placed in default context object - borrowrequest_list\n def get_queryset(self):\n return BorrowRequest.objects.filter(borrower=self.request.user)\n\ndef borrow_request_create(request):\n book_pk = request.POST['book_pk']\n book = get_object_or_404(Book, pk=book_pk)\n \n # first check if user is authenticated\n if (request.user.is_authenticated()):\n # check if this is a duplicate request\n existingRequest = BorrowRequest.objects.filter(book=book, borrower=request.user, status='pending')\n if existingRequest:\n messages.info(request, \"You have already created request for this book!\")\n # user is logged in and request is not a duplicate\n else:\n # create borrow request\n borrowRequest = BorrowRequest(book=book, borrower=request.user, status='pending')\n borrowRequest.save()\n # notify user that request is sent\n messages.info(request, \"Request is sent to \" + unicode(book.owner) + \".\")\n\n # if user is not authenticated render a message to user\n else:\n messages.info(request, \"Log in first!\")\n\n return render(request, 'good_library/book_detail.html', {\n 'book': book,\n })\n\n# change status of pending borrow request\ndef borrow_request_update(request):\n # get borrow request\n borrow_request = get_object_or_404(BorrowRequest, pk=request.POST['borrow_request_pk'])\n new_status = request.POST['borrow_request_status']\n\n # users can only change status of pending requests\n if (borrow_request.status != \"pending\"):\n messages.info(request, \"You can only change status of pending requests!\")\n # pending status can be updated to accepted or rejected\n elif (new_status == \"accepted\" or new_status == \"rejected\"):\n borrow_request.status = new_status\n borrow_request.save()\n messages.info(request, \"Request status updated!\")\n # update book borrower\n book = borrow_request.book\n book.borrower = borrow_request.borrower\n book.save()\n # malicious user\n else:\n messages.info(\"Illegal status submitted.\")\n\n # redirect to BorrowRequestListView which already handles request list view\n return redirect(\"borrow_request_list\")\n\n","sub_path":"good_library/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"233500950","text":"# https://github.com/ethereum/wiki/wiki/JSON-RPC#json-rpc-api-reference\n\n# the methods are all defined here, check em out if you forget\n\nimport requests\nimport json\n\nfrom web3 import Web3, HTTPProvider, IPCProvider, WebsocketProvider\n\n\n\n# change the node port to something else later\n\nNODE_PORT = 22001 \n\nPRIVATE_ABI = '[{\"constant\":true,\"inputs\":[],\"name\":\"storedData\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"x\",\"type\":\"uint256\"}],\"name\":\"set\",\"outputs\":[],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"get\",\"outputs\":[{\"name\":\"retVal\",\"type\":\"uint256\"}],\"payable\":false,\"type\":\"function\"},{\"inputs\":[{\"name\":\"initVal\",\"type\":\"uint256\"}],\"payable\":false,\"type\":\"constructor\"}]'\n\nFIRST_TX = '0x58d58bf5b1bfc3ce5270953d4ff3c1067177390d86ac8ba974a1f6f286859473'\n\nw3 = Web3(HTTPProvider(f'http://localhost:{NODE_PORT}'))\n\n\ntx = w3.geth.admin.peers()[0]['enode']\nprint(tx)\n\n\n# print([json.dumps(w3.geth.admin.peers()], indent=4, sort_keys=True))\n\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n\n# convert from hex to int\ndef hexdec(hex): \n\treturn int(hex, 16)\n\n# the name of the rpc method in a string\n# the port is the rpc port. just the ending not the full url\ndef rpc_method(method, params, port, cleanhex=False, pretty_json=False):\n\n\turl = f'http://localhost:{port}'\n\n\tpayload = {\n\t\t\"method\": method,\n\t\t\"params\" : params,\n\t\t\"id\": 67,\n\t\t\"jsonrpc\": \"2.0\"\n\t}\n\n\n\tresponse = requests.post(url, json=payload).json()\n\n\ttry:\n\n\t\tresult = response['result']\n\n\t\tif cleanhex == True:\n\t\t\tdec = hexdec(result) \n\t\t\tprint(f'{bcolors.OKGREEN}{method} {bcolors.ENDC} : {dec}')\n\n\t\telif pretty_json == True:\n\t\t\tprint(f'{bcolors.OKGREEN}{method} {bcolors.ENDC} : ')\n\t\t\tprint(json.dumps(result, indent=4, sort_keys=True))\n\n\t\telse: \n\t\t\tprint(f'{bcolors.OKGREEN}{method} {bcolors.ENDC} : {result}')\n\n\t\treturn result \n\texcept:\n\t\tprint(f'{bcolors.FAIL}{response}{bcolors.ENDC}')\n\n\nrpc_method('web3_clientVersion', [], NODE_PORT)\nrpc_method('eth_syncing', [], NODE_PORT)\nrpc_method('net_version', [], NODE_PORT)\nrpc_method('net_listening', [], NODE_PORT)\nrpc_method('net_peerCount', [], NODE_PORT, True)\nrpc_method('eth_protocolVersion', [], NODE_PORT, True)\nrpc_method('eth_coinbase', [], NODE_PORT)\nrpc_method('eth_mining', [], NODE_PORT)\nrpc_method('eth_hashrate', [], NODE_PORT)\nrpc_method('eth_gasPrice', [], NODE_PORT)\n\nfirst_acc = rpc_method('eth_accounts', [], NODE_PORT)\nrpc_method('eth_blockNumber', [], NODE_PORT)\nrpc_method('eth_getBalance', [first_acc[0], 'latest'], NODE_PORT, True)\nrpc_method('eth_getTransactionCount', [first_acc[0], 'latest'], NODE_PORT, True)\n\nrpc_method('eth_getTransactionByHash', [FIRST_TX], NODE_PORT, False, True)\ncontract_addr = rpc_method('eth_getTransactionReceipt', [FIRST_TX], NODE_PORT, False, True)['contractAddress']\n\n# needed for the thing, otherwise it complains\nchecksum_contract_addr = Web3.toChecksumAddress(contract_addr)\n\nprint(f'{bcolors.WARNING}contract address is : {contract_addr} {bcolors.ENDC}')\n\n# Let's try to call the Get on the contract\n\n# print(Web3.isChecksumAddress(checksum_contract_addr))\n\nprivate_contract = w3.eth.contract(address=checksum_contract_addr, abi=PRIVATE_ABI)\n\ntry:\n\tfinal_value = private_contract.functions.get().call()\n\tprint(f'{bcolors.WARNING}private value is : {final_value} {bcolors.ENDC}')\nexcept Exception as e:\n\tprint(f'{bcolors.FAIL} {e} {bcolors.ENDC}')\n\n\n\n\n# private contract address\n# 0x58d58bf5b1bfc3ce5270953d4ff3c1067177390d86ac8ba974a1f6f286859473\n\n\n\n\n\n\n\n\n","sub_path":"quorum/scripts/node_info.py","file_name":"node_info.py","file_ext":"py","file_size_in_byte":3689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"233820643","text":"# Basic training configuration file\nfrom pathlib import Path\nfrom torch.optim import SGD\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau, MultiStepLR\nfrom torch.utils.data import ConcatDataset\nfrom torchvision.transforms import RandomHorizontalFlip, RandomVerticalFlip\nfrom torchvision.transforms import RandomResizedCrop\nfrom torchvision.transforms import ColorJitter, ToTensor, Normalize\n\nfrom common.dataset import FilesFromCsvDataset\nfrom common.data_loaders import get_data_loader, get_trainval_indices\nfrom models.nasnet_a_large import FurnitureNASNetALarge350\n\n\nSEED = 1245\nDEBUG = True\nDEVICE = \"cuda\"\n\nOUTPUT_PATH = Path(\"output\") / \"cv\" / \"nasnetlarge_350_resized_crop\"\n\nsize = 350\n\nTRAIN_TRANSFORMS = [\n RandomResizedCrop(size, scale=(0.6, 1.0), interpolation=3),\n RandomVerticalFlip(p=0.5),\n RandomHorizontalFlip(p=0.5),\n ColorJitter(hue=0.12, brightness=0.12),\n ToTensor(),\n Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n]\n\nVAL_TRANSFORMS = [\n RandomResizedCrop(size, scale=(0.7, 1.0), interpolation=3),\n RandomHorizontalFlip(p=0.5),\n ToTensor(),\n Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n]\n\n\nbatch_size = 8\nnum_workers = 15\n\n\ntrain_dataset = FilesFromCsvDataset(\"output/unique_filtered_train_dataset.csv\")\nval_dataset = FilesFromCsvDataset(\"output/unique_filtered_val_dataset.csv\")\ntrainval_dataset = ConcatDataset([train_dataset, val_dataset])\n\n\n# #### Stratified split :\nfold_index = 3\nn_splits = 4\ntrain_index, val_index = get_trainval_indices(trainval_dataset,\n fold_index=fold_index, n_splits=n_splits,\n xy_transforms=None,\n batch_size=batch_size, n_workers=8,\n seed=SEED)\n# ####\n\n\nTRAIN_LOADER = get_data_loader(trainval_dataset,\n data_transform=TRAIN_TRANSFORMS,\n sample_indices=train_index,\n batch_size=batch_size,\n num_workers=num_workers,\n pin_memory=\"cuda\" in DEVICE)\n\n\nVAL_LOADER = get_data_loader(trainval_dataset,\n data_transform=VAL_TRANSFORMS,\n sample_indices=val_index,\n batch_size=batch_size,\n num_workers=num_workers,\n pin_memory=\"cuda\" in DEVICE)\n\n\nMODEL = FurnitureNASNetALarge350(pretrained='imagenet')\n\n\nN_EPOCHS = 10\n\n\nOPTIM = SGD(\n params=[\n {\"params\": MODEL.stem.parameters(), 'lr': 0.0001},\n {\"params\": MODEL.features.parameters(), 'lr': 0.01},\n {\"params\": MODEL.classifier.parameters(), 'lr': 0.1},\n ],\n momentum=0.9)\n\n\nLR_SCHEDULERS = [\n MultiStepLR(OPTIM, milestones=[2, 4, 6, 8, 10, 12], gamma=0.22)\n]\n\n\nREDUCE_LR_ON_PLATEAU = ReduceLROnPlateau(OPTIM, mode='min', factor=0.5, patience=3, threshold=0.08, verbose=True)\n\nEARLY_STOPPING_KWARGS = {\n 'patience': 15,\n # 'score_function': None\n}\n\nLOG_INTERVAL = 100\n","sub_path":"classification/imaterialist_challenge_furniture_2018/configs/cv/nasnetlarge_350_resized_crop/fold_3.py","file_name":"fold_3.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"208847619","text":"#!/usr/bin/env python\n# coding: utf-8\n# /*##########################################################################\n#\n# Copyright (c) 2016-2017 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ###########################################################################*/\n\nfrom __future__ import absolute_import, division, unicode_literals\n\n__authors__ = ['Marius Retegan']\n__license__ = 'MIT'\n__date__ = '10/04/2017'\n\nimport crispy\nimport os\nimport silx\nimport sys\nfrom cx_Freeze import setup, Executable\n\n\ndef get_version():\n import version\n return version.strictversion\n\npackages = ['matplotlib', 'PyQt5.QtPrintSupport']\nincludes = []\nexcludes = ['tkinter']\n\nmodules = [crispy, silx]\nmodules_path = [os.path.dirname(module.__file__) for module in modules]\ninclude_files = [(module, os.path.basename(module)) for module in modules_path]\n\noptions = {\n 'build_exe': {\n 'packages': packages,\n 'includes': includes,\n 'excludes': excludes,\n 'include_files': include_files,\n 'include_msvcr': True,\n },\n }\n\nbase = None\nif sys.platform == 'win32':\n base = 'Win32GUI'\n\nexecutables = [\n Executable(\n 'scripts/crispy',\n base=base,\n icon='icons/crispy.ico',\n ),\n ]\n\n\ndef main():\n setup(name='crispy',\n version=get_version(),\n options=options,\n executables=executables)\n\nif __name__ == '__main__':\n main()\n","sub_path":"setup_cx_freeze.py","file_name":"setup_cx_freeze.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"63795988","text":"import base64, requests, json\nfrom db_operator.item_db import ItemDb\n\n\ndef temp_AIR(image):\n url = 'http://api.tianapi.com/txapi/imglajifenlei/index'\n key = '6fda4f8543d58907405dc57896532b64'\n headers = 'Content-Type: application/x-www-form-urlencoded'\n post_image = str(base64.b64encode(image), encoding='utf-8')\n datas = {\n 'key': key,\n 'img': post_image\n }\n res = requests.post(url=url, data=datas).text\n # res = '{\"code\":200,\"msg\":\"success\",\"newslist\":[{\"name\":\"鼠标\",\"trust\":89,\"lajitype\":0,\"lajitip\":\"鼠标是可回收垃圾,常见包括各类废金属、玻璃瓶、饮料瓶、电子产品等。投放时应注意轻投轻放、清洁干燥、避免污染。\"},{\"name\":\"摩托车\",\"trust\":57,\"lajitype\":4,\"lajitip\":\"摩托车的垃圾分类系统暂时无法判别,请重新尝试拍摄物体的主要特征。\"},{\"name\":\"电脑外设\",\"trust\":38,\"lajitype\":4,\"lajitip\":\"电脑外设的垃圾分类系统暂时无法判别,请重新尝试拍摄物体的主要特征。\"},{\"name\":\"铜斑蛇\",\"trust\":19,\"lajitype\":4,\"lajitip\":\"铜斑蛇的垃圾分类系统暂时无法判别,请重新尝试拍摄物体的主要特征。\"},{\"name\":\"锹形虫\",\"trust\":0,\"lajitype\":4,\"lajitip\":\"锹形虫的垃圾分类系统暂时无法判别,请重新尝试拍摄物体的主要特征。\"}]}'\n get_res = json.loads(res)\n result = {}\n if int(get_res['code']) == 200:\n item_list = list(get_res['newslist'])\n i = 0\n ID = -1\n for item in item_list:\n if int(item['lajitype']) != 4:\n if i >= 1:\n break\n ClassID = int(item['lajitype'])\n # 可回收\n if ClassID == 0:\n ID = 1\n # 有害\n elif ClassID == 1:\n ID = 3\n # 厨余垃圾\n elif ClassID == 2:\n ID = 4\n # 其他垃圾\n elif ClassID == 3:\n ID = 2\n temp = {\n 'ClassID': ID,\n 'Name': item['name']\n }\n result[str(i)] = temp\n i += 1\n print(res)\n return result\n else:\n print(res)\n return {'Name': 'ERROR', 'ClassID': -1}\n","sub_path":"AIR/AIR.py","file_name":"AIR.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"523100949","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef drawLineChart(title,xLabel, yLabel, lineDatas):\n plt.rcParams['font.sans-serif'] = ['SimHei']\n\n for lineData in lineDatas:\n plt.plot(lineData[0], lineData[1], label=lineData[2])\n\n plt.xlabel(xLabel)\n\n plt.ylabel(yLabel)\n\n plt.title(title)\n\n plt.legend()\n\n plt.show()\n","sub_path":"util/ChartUtil.py","file_name":"ChartUtil.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"450933920","text":"import hashlib\n\narchive1 = 'a.txt'\narchive2 = 'b.txt'\n\nhash1 = hashlib.new('ripemd160')\nhash1.update(open(archive1, 'rb').read())\n\nhash2 = hashlib.new('ripemd160')\nhash2.update(open(archive2, 'rb').read())\n\nif hash1.digest() != hash2.digest():\n print(f'O arquivo {archive1} é diferente do arquivo {archive2}')\n print(f'O hash do arquivo {archive1} é: ', hash1.hexdigest())\n print(f'O hash do arquivo {archive2} é: ', hash2.hexdigest())\nelse:\n print(f'O arquivo {archive1} é igual ao arquivo {archive2}')\n print(f'O hash do arquivo {archive1} é: ', hash1.hexdigest())\n print(f'O hash do arquivo {archive2} é: ', hash2.hexdigest())\n","sub_path":"3 - Projeto Gerador de Senhas/hash-comparator.py","file_name":"hash-comparator.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"17994099","text":"import requests\nimport json\n\nendpoint = 'http://challenge.code2040.org/api/register'\ndictionary = {'email': 'jsanch14@nyit.edu', 'github': 'https://github.com/JXSan/CODE2040'}\n\ndef sendPost(endpoint, dictionary): \n #post information to endpoint\n r = requests.post(endpoint, data = json.dumps(dictionary))\n return r.json()['result']\n\n#retrieve token\nsendPost(endpoint, dictionary)\n\n#token info\ntoken = 'qnqASfdeyH'\ntokenDictionary = {'token' : 'qnqASfdeyH'}\n","sub_path":"sendPostRequest.py","file_name":"sendPostRequest.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"467291659","text":"from django.conf.urls import include, url, patterns\nfrom django.contrib.auth import views as auth_views\nfrom account.forms import CustomAuthenticationForm\n\nfrom website import views\n\nurlpatterns = patterns('',\n url(regex=r'^$',\n view = views.ProductListView.as_view(),\n name = 'index'\n ),\n url(regex=r'^show/(?P\\d+)/$',\n view = views.ProductDetailView.as_view(),\n name = 'product',\n ),\n url(regex=r'^cadastrar/$',\n view = views.SignUpCreateView.as_view(),\n name = 'signup',\n ),\n url(regex=r'^login/$',\n view = auth_views.login,\n name = 'login',\n kwargs = {'authentication_form': CustomAuthenticationForm},\n ),\n url(regex=r'^logout/$',\n view = auth_views.logout,\n name = 'logout',\n kwargs = {'next_page': '/'},\n ),\n url(regex=r'^system/dashboard/$',\n view = views.DashboardView.as_view(),\n name = 'dashboard',\n ),\n)\n\n\n","sub_path":"website/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"149078805","text":"import click\nfrom PIL import Image\nimport os\n\n\n@click.command()\n@click.option('--path',default='.')\n\ndef main(path):\n genres = [genre.name for genre in os.scandir(path) if genre.name[0] != '.']\n genres.remove('images-resized')\n for genre in genres:\n images = [os.path.join(path,genre,image.name) for image in os.scandir(os.path.join(path,genre)) if image.name[0] != '.']\n try:\n os.makedirs(os.path.join(path,'images-resized',genre))\n except FileExistsError:\n pass\n\n save_path = os.path.join(path,'images-resized',genre,genre)\n ext = '.png'\n for index, image in enumerate(images):\n img = Image.open(image)\n img.convert('RGB')\n width, height = img.size\n factor = 0.8\n new_width = int(width * factor)\n new_height = int(height * factor)\n\n new_img = img.resize((256,256), Image.ANTIALIAS)\n new_img.save(save_path + str(index+1) + ext,quality=96)\n print('Image {}/{} resized'.format(index+1,len(images)))\n\nif __name__ == '__main__':\n main()","sub_path":"generate/resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"207896933","text":"#!/usr/bin/env python\n\n\"\"\"\nPython source code - replace this with a description of the code and write the code below this text.\n\"\"\"\n\nimport tempfile\nimport os\nimport os.path as osp\nimport sys\nimport subprocess\n\nclass Terp:\n\n def __init__(self):\n self.this_dir = osp.dirname(osp.abspath(__file__))\n self.terp_src_dir = osp.join(self.this_dir, 'terp-src')\n self.terpa_path = osp.join(self.terp_src_dir, 'bin', 'terpa')\n\n def compute_score(self, gts, res):\n assert(gts.keys() == res.keys())\n imgIds = gts.keys()\n scores = []\n\n hyp = []\n refs = []\n\n\n this_sys = 'py'\n def gen_line(imgid, sent):\n seg_id = 1\n test_id = 1\n return '%s ([%s][%d][%s][%d])' % (sent, this_sys, test_id, imgid, seg_id)\n\n for i in imgIds:\n assert(len(res[i]) == 1)\n hyp.append(gen_line(i, res[i][0]))\n for ref in gts[i]:\n refs.append(gen_line(i, ref))\n\n hyp_file = tempfile.NamedTemporaryFile(delete=False, dir=self.this_dir)\n hyp_file.write('\\n'.join(hyp))\n hyp_file.close()\n\n refs_file = tempfile.NamedTemporaryFile(delete=False, dir=self.this_dir)\n refs_file.write('\\n'.join(refs))\n refs_file.close()\n\n output_prefix = tempfile.NamedTemporaryFile(delete=False, dir=self.this_dir)\n output_prefix.close()\n subprocess.call([self.terpa_path, '-h', hyp_file.name, '-r', refs_file.name, '-n', output_prefix.name, '-o', 'nist'], stdout = open(os.devnull))\n\n doc_src = output_prefix.name + this_sys + '.doc.scr'\n seg_src = output_prefix.name + this_sys + '.seg.scr'\n sys_src = output_prefix.name + this_sys + '.sys.scr'\n\n with open(sys_src) as reader:\n fields = reader.readline().split('\\t')\n score = float(fields[-2])\n\n with open(seg_src) as reader:\n scores_dict = {}\n for line in reader:\n fields = line.strip().split('\\t')\n scores_dict[fields[2]] = float(fields[-2])\n scores = []\n for i in imgIds:\n scores.append(scores_dict[i])\n\n for f in [hyp_file.name, refs_file.name, doc_src, sys_src, seg_src, output_prefix.name]:\n os.remove(f)\n\n return score, scores\n\n def method(self):\n return \"TERP\"\n\n# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4\n","sub_path":"pycocoevalcap/terp/terp.py","file_name":"terp.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"412181949","text":"\r\ndef obeb(sayi1, sayi2):\r\n for i in range(int(sayı1), 0, -1):\r\n if sayi1 % i == 0 and sayi2 % i == 0:\r\n break\r\n return i\r\n\r\n\r\nsayı1 = int(input(\"bir sayı giriniz:\"))\r\nsayı2 = int(input(\"bir sayhı giriniz:\"))\r\nprint(obeb(sayı1, sayı2))\r\n\r\ndef okek(sayi1,sayi2):\r\n\r\n okek=(sayi1*sayi2)/obeb(sayi1,sayi2)\r\n print(okek)\r\n\r\n return okek\r\n\r\n\r\nsayı1 = int(input(\"bir sayı giriniz:\"))\r\nsayı2 = int(input(\"bir sayhı giriniz:\"))\r\nokek(sayı1,sayı2)\r\n","sub_path":"4.ve 5.ödev ebob-ekok.py","file_name":"4.ve 5.ödev ebob-ekok.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"590105365","text":"# 整合模块\n# 发现在扩展的时候,因为这个只调用调度模块,导致第二个调度模块的检查器会将之前检查的代理ip再次检查一遍这样非常的费事,\n# 已经检查过一次了就不需要在重复检查了,所以这里有两个解决方案:\n# 1、重构scheduler模块,让他可以实现多个爬取器,然后一个检查器\n# 2、添加整合模块,将每一个调度器的爬取器,爬去下来的ip存放到不同的文件中,然后检查的时候也只是检查这个调度器下的文件中的ip,\n# 所有的调度器都结束工作之后,利用整合模块将所有文件中的ip整合到一个文件中\nimport sys\nimport os\nimport pandas as pd\nfrom scheduler import Scheduler\nimport extension\nfrom setting import csv_file_path\n\nclass Union(object):\n # 参数 file_list: 需要整合的文件路径列表\n # 参数 is_del_file : 是否需要删除中间文件,默认为不删除\n def __init__(self,file_list,is_del_file = False):\n # self.save = SaveIp()\n self.file_list = file_list\n self.perpare_work()\n self.is_del_file = is_del_file\n \n # 检查工作:检查传入的file_list中的文件是否都存在,\n # 将不存在的文件路径移除删除\n def perpare_work(self):\n self.file_list = list(set(self.file_list))\n for path in self.file_list:\n if not os.path.exists(path):\n self.file_list.remove(path)\n\n \n def run(self):\n # save = SaveIp(mode='a')\n df = pd.DataFrame(data=[],columns=[\"ip\",\"scores\"])\n for file_path in self.file_list:\n file_ips = self.read(file_path)\n if file_ips is not None:\n df = df.append(file_ips)\n\n # scores = [10 for _ in range(len(ips))]\n # df = pd.DataFrame({\"ip\":ips,\"scores\":scores})\n df.to_csv(csv_file_path,index=None,mode='a',columns=None,header=False) # 都保存到混沌代理池中\n print(\"文件整合成功\")\n if self.is_del_file:\n print(\"正在删除临时文件。。。\")\n self.delete_file()\n print(\"临时文件删除成功\")\n\n def delete_file(self):\n for file_path in self.file_list:\n print(f\"正在删除{file_path}\")\n os.remove(file_path)\n\n\n \n def read(self,file_path):\n try:\n dt = pd.read_csv(file_path)\n dt.columns=[\"ip\",\"scores\"]\n return dt\n except:\n return None\n\nif __name__ == \"__main__\":\n current_path = os.path.dirname(os.path.abspath(__file__))\n # f_path = current_path+\"\\\\89_ip.csv\"\n f_name = [\"\\\\qingting.csv\",'\\\\kuai.csv',\"\\\\89_ip.csv\",\"\\\\tomato.csv\"]\n f_path_list = [current_path+_ for _ in f_name]\n kuai_scheduler = Scheduler(ip_from=\"web\",base_url=extension.kuai_base_url,crawler_parse_fn=extension.kuai_parse,crawler_pages=200,save_m=\"a\",save_path=f_path_list[1],client_path=f_path_list[1],name=\"快代理调度器\")\n kuai_scheduler.start_scheduler()\n kuai_scheduler.shutdown()\n\n qingting_scheduler = Scheduler(ip_from=\"web\",base_url=extension.qingting_base_url,crawler_pages=4,crawler_parse_fn=extension.qingting_parse,save_path=f_path_list[0],save_m=\"a\",client_path=f_path_list[0],name=\"蜻蜓代理调度器\")\n qingting_scheduler.start_scheduler()\n qingting_scheduler.shutdown()\n\n _89_scheduler = Scheduler(ip_from='web',base_url=extension._89_base_url,crawler_pages=10,crawler_parse_fn=extension._89_parse,save_m='a',save_path=f_path[1],client_path=f_path[1],name=\"89代理调度器\")\n _89_scheduler.start_scheduler()\n _89_scheduler.shutdown()\n\n tomato_scheduler = Scheduler(ip_from='web',base_url=extension._89_base_url,crawler_pages=10,crawler_parse_fn=extension._89_parse,save_m='a',save_path=f_path[2],client_path=f_path[2],name=\"番茄代理调度器\")\n tomato_scheduler.start_scheduler()\n tomato_scheduler.shutdown()\n \n union = Union(f_path_list,True)\n union.run()\n","sub_path":"union.py","file_name":"union.py","file_ext":"py","file_size_in_byte":3984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"505298957","text":"# -*- coding: utf-8 -*-\nimport json\nimport urllib\nimport time\nimport scrapy\nfrom scrapy.http.request import Request\nfrom train12306.items import AgencyItem\nfrom train12306.items import CommitItem\n\n\nclass AgencysSpider(scrapy.Spider):\n name = 'AgentcysSpider'\n# start_urls = ['https://kyfw.12306.cn/otn/userCommon/allProvince']\n\n custom_settings = {\n 'ITEM_PIPELINES': {\n 'train12306.pipelines.AgencySQLPipeline': 300,\n },\n# 'DUPEFILTER_DEBUG': True,\n 'DOWNLOADER_MIDDLEWARES': {\n 'train12306.middle.DownloaderMiddleware': 500,\n },\n #去除重复的url\n 'DUPEFILTER_CLASS': \"train12306.filter.URLTurnFilter\",\n #断点续传,实现在网络掉线或人为停止scrapy后,下次启动后可以接着上次的下载进度继续下载\n 'JOBDIR': \"s/agencys\",\n }\n\n def __init__(self, *a, **kw):\n super(AgencysSpider, self).__init__(self.name, **kw)\n # self.turn = a[0]\n #引入turn参数,意为轮次,用来判断下载的信息是在哪一次下载的\n turn = int(time.time() / 86400)\n self.turn = turn\n self.logger.info(\"%s. this turn %d\" % (self.name, self.turn))\n\n def start_requests(self):\n #该url地址为代售点所有的省份信息\n yield Request(\"https://kyfw.12306.cn/otn/userCommon/allProvince\", callback = self.parse, meta = {\"turn\":self.turn})\n\n def parse(self, response):\n url = \"https://kyfw.12306.cn/otn/queryAgencySellTicket/query?\"\n\n j = json.loads(response.body)\n #prov 例子为{\"chineseName\":\"安徽\",\"allPin\":\"\",\"simplePin\":\"ah\",\"stationTelecode\":\"34\"}\n for prov in j[\"data\"]:\n\n params = {\"province\":prov[\"chineseName\"].encode(\"utf-8\"), \"city\":\"\", \"county\":\"\"}\n #s_url为组合后的新的url地址,改地址可以访问到各个省的所有代售点信息的json页面\n #s_url :https://kyfw.12306.cn/otn/queryAgencySellTicket/query?province=%E5%90%89%E6%9E%97&city=&county=\n s_url = url + urllib.urlencode(params)\n yield Request(s_url, callback = self.parse_agency, meta = {\"turn\":response.meta[\"turn\"]})\n\n def parse_agency(self, response):\n datas = json.loads(response.body)\n for data in datas[\"data\"][\"datas\"]:\n item = AgencyItem()\n #省\n item[\"province\"] = data[\"province\"]\n #城市\n item[\"city\"] = data[\"city\"]\n #区县\n item[\"county\"] = data[\"county\"]\n #地址\n item[\"address\"] = data[\"address\"]\n #代售点名称\n item[\"name\"] = data[\"agency_name\"]\n #窗口数量\n item[\"windows\"] = data[\"windows_quantity\"]\n #营业开始时间\n item[\"start\"] = data[\"start_time_am\"]\n #营业结束时间\n item[\"end\"] = data[\"stop_time_pm\"]\n #轮次\n item[\"turn\"] = response.meta[\"turn\"]\n yield item\n yield CommitItem()\n\n\n","sub_path":"train12306/spiders/agencys.py","file_name":"agencys.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"602705928","text":"import warnings\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom torch.nn.modules import BatchNorm2d\n\nfrom hynet.imgr.models.brew_module import BrewModel, BrewModuleList\n\ncfgs = {\n 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], # check 13\n 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], # check 16\n 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], # check 19\n}\n\nactivation = nn.ReLU\n\ndef make_layers(in_channels , cfg, batch_norm=False, bias=False):\n layers = []\n \n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1, bias=bias)\n if batch_norm:\n layers += [conv2d, BatchNorm2d(v), activation()]\n else:\n layers += [conv2d, activation()]\n in_channels = v\n\n return layers\n\nclass EnDecoder(BrewModel):\n\n def __init__(self,\n in_channels,\n num_classes,\n batch_norm=False,\n bias=False,\n model_type='B3'):\n super(EnDecoder, self).__init__()\n\n self.cfg = cfgs[model_type]\n wh = 32\n for i in self.cfg:\n if i == 'M':\n wh = wh / 2\n self.img_size = [int(wh), int(wh)]\n \n self.out_channels = self.cfg[-2]\n\n self.encoder = BrewModuleList(\n make_layers(in_channels, self.cfg, batch_norm=batch_norm, bias=bias)\n )\n self.decoder = BrewModuleList([\n nn.Flatten(start_dim=1),\n nn.Linear(self.out_channels * self.img_size[0] * self.img_size[1], 4096, bias=bias),\n activation(),\n nn.Dropout(),\n nn.Linear(4096, 4096, bias=bias),\n activation(),\n nn.Dropout(),\n nn.Linear(4096, num_classes, bias=bias)\n ])\n\n self.focused_layer = self.encoder[0]\n # check network wrong classification case\n def all_zero_hook(self, input, result):\n if isinstance(result, tuple):\n res = result[0]\n else:\n res = result\n aggregate = res.abs().flatten(start_dim=1).sum(-1)\n flag = (aggregate > 0).float().mean()\n if flag != 1.0:\n warnings.warn(\"{} layer has all zero value : {}\".format(self, flag))\n for m in self.encoder.named_modules():\n m[1].register_forward_hook(all_zero_hook)\n for m in self.decoder.named_modules():\n m[1].register_forward_hook(all_zero_hook)\n\n # intislaization whole network module\n self._initialization(self.encoder)\n self._initialization(self.decoder)\n \n def _initialization(self, mlist):\n for idx, m in enumerate(mlist):\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)","sub_path":"hynet/imgr/models/vgg.py","file_name":"vgg.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"116982399","text":"###############################################################################\n#\n# Copyright (c) 2017-2020 Master AI, Inc.\n# ALL RIGHTS RESERVED\n#\n# Use of this library, in source or binary form, is prohibited without written\n# approval from Master AI, Inc.\n#\n###############################################################################\n\nimport asyncio\nfrom pprint import pprint\n\nimport cio.aa_controller_v3 as controller\n\n\ndef fmt(vals):\n print(''.join(f'{v:10.2f}' for v in vals))\n\n\nasync def run():\n c = controller.CioRoot()\n caps = await c.init()\n pprint(caps)\n\n gyro = await c.acquire('Gyroscope')\n gyro_accum = await c.acquire('Gyroscope_accum')\n\n for i in range(500):\n fmt(await gyro.read())\n fmt(await gyro_accum.read())\n print()\n if i % 50 == 0:\n await gyro_accum.reset()\n await asyncio.sleep(0.1)\n\n await c.release(gyro)\n await c.release(gyro_accum)\n\n await c.close()\n\n\nif __name__ == '__main__':\n asyncio.run(run())\n\n","sub_path":"cio/aa_controller_v3/examples/gyro.py","file_name":"gyro.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"653321062","text":"# coding=utf-8\n# Copyright 2022 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Concept types supported by Concept PPO.\"\"\"\n\nimport enum\n\n\nclass ConceptType(enum.IntEnum):\n \"\"\"Agents which have default initializers supported below.\"\"\"\n BINARY = 0\n SCALAR = 1\n CATEGORICAL = 2\n POSITION = 3\n\n\nclass ObjectType(enum.IntEnum):\n \"\"\"Agents which have default initializers supported below.\"\"\"\n AGENT = 0\n ENVIRONMENT_OBJECT = 1\n","sub_path":"concept_marl/utils/concept_types.py","file_name":"concept_types.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"144325495","text":"# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Transform a set of Haiku-using functions which use overlapping params.\"\"\"\n\n# pylint: disable=unnecessary-lambda\n\nimport types\nfrom typing import Any, Callable, NamedTuple, Tuple\n\nimport dataclasses\nfrom haiku._src import analytics\nfrom haiku._src import transform\nfrom haiku._src import typing\nimport jax\n\n# If you are forking replace this with `import haiku as hk`.\nhk = types.ModuleType('haiku')\nhk.transform_with_state = transform.transform_with_state\nhk.Params = typing.Params\nhk.State = typing.State\ndel transform, typing\n\nPyTreeDef = Any\nTemplateFn = Callable[..., Any]\nTreeOfApplyFns = Any\n\n\nclass MultiTransformed(NamedTuple):\n \"\"\"Holds a collection of pure functions.\n\n Attributes:\n init: A pure function: ``params = init(rng, *a, **k)``\n apply: A JAX tree of pure functions each with the signature:\n ``out = apply(params, rng, *a, **k)``.\n\n See also:\n :class:`Transformed`: Single apply variant of multi-transform.\n :class:`MultiTransformedWithState`: Multi apply with state variant.\n \"\"\"\n\n # Args: [Optional[PRNGKey], ...]\n init: Callable[..., hk.Params]\n\n # PyTree[Callable[[hk.Params, Optional[PRNGKey], ...], Any]]\n apply: Any\n\n\nclass MultiTransformedWithState(NamedTuple):\n \"\"\"Holds a collection of pure functions.\n\n Attributes:\n init: A pure function: ``params, state = init(rng, *a, **k)``\n apply: A JAX tree of pure functions each with the signature:\n ``out, state = apply(params, state, rng, *a, **k)``.\n\n See also:\n :class:`TransformedWithState`: Single apply variant of multi-transform.\n :class:`MultiTransformed`: Multi apply with state variant.\n \"\"\"\n\n # Args: [Optional[PRNGKey], ...]\n init: Callable[..., Tuple[hk.Params, hk.State]]\n\n # PyTree[Callable[[hk.Params, hk.State, Optional[PRNGKey], ...],\n # Tuple[Any, hk.State]]]\n apply: Any\n\n\n@dataclasses.dataclass\nclass Box:\n \"\"\"Holds a Python value and has no leaves.\"\"\"\n python_value: Any\n\njax.tree_util.register_pytree_node(\n Box, lambda b: ([], b.python_value), lambda v, _: Box(v))\n\n\ndef multi_transform_with_state(\n f: Callable[[], Tuple[TemplateFn, TreeOfApplyFns]],\n) -> MultiTransformedWithState:\n \"\"\"Transforms a collection of functions using Haiku into pure functions.\n\n See :func:`multi_transform` for more details.\n\n Example:\n\n >>> def f():\n ... encoder = hk.Linear(1, name=\"encoder\")\n ... decoder = hk.Linear(1, name=\"decoder\")\n ...\n ... def init(x):\n ... z = encoder(x)\n ... return decoder(z)\n ...\n ... return init, (encoder, decoder)\n\n >>> f = hk.multi_transform_with_state(f)\n >>> rng = jax.random.PRNGKey(42)\n >>> x = jnp.ones([1, 1])\n >>> params, state = f.init(rng, x)\n >>> jax.tree_map(jnp.shape, params)\n {'decoder': {'b': (1,), 'w': (1, 1)},\n 'encoder': {'b': (1,), 'w': (1, 1)}}\n\n >>> encode, decode = f.apply\n >>> z, state = encode(params, state, None, x)\n >>> y, state = decode(params, state, None, z)\n\n Args:\n f: Function returning a \"template\" function and an arbitrary\n tree of functions using modules connected in the template function.\n\n Returns:\n An ``init`` function and a tree of pure ``apply`` functions.\n\n See also:\n :func:`transform_with_state`: Transform a single apply function.\n :func:`multi_transform`: Transform multiple apply functions without state.\n \"\"\"\n analytics.log_once('multi_transform_with_state')\n\n def init_fn(*args, **kwargs):\n \"\"\"Returns initial state for the transformed functions.\"\"\"\n return f()[0](*args, **kwargs)\n\n init_fn = hk.transform_with_state(init_fn).init\n\n def apply_fn_i(i):\n def apply_fn(*args, **kwargs):\n \"\"\"Applies the transformed function at the given inputs.\"\"\"\n return jax.tree_leaves(f()[1])[i](*args, **kwargs)\n return apply_fn\n\n # We need to find out the structure of f()[1], including how many\n # functions there are, so that we can transform them individually and repack\n # into the same tree structure. It's valid for modules to declare parameters\n # in their constructor, so we need to create something that looks like\n # hk.Params in order to do this. `jax.eval_shape` interprets the function\n # abstractly, ie no real params are created, and we don't need to touch the\n # accelerator. This means hardcoding the RNG below is fine.\n def get_output_treedef() -> Box:\n rng = jax.random.PRNGKey(42) # This is fine, see above\n fns = hk.transform_with_state(lambda: f()[1])\n apply_fns, _ = fns.apply(*fns.init(rng), rng)\n return Box(jax.tree_structure(apply_fns))\n\n output_treedef = jax.eval_shape(get_output_treedef).python_value\n apply_fns = make_tree(lambda i: hk.transform_with_state(apply_fn_i(i)).apply,\n output_treedef)\n\n return MultiTransformedWithState(init_fn, apply_fns)\n\n\ndef multi_transform(\n f: Callable[[], Tuple[TemplateFn, TreeOfApplyFns]],\n) -> MultiTransformed:\n \"\"\"Transforms a collection of functions using Haiku into pure functions.\n\n In many scenarios we have several modules which are used either as primitives\n for several Haiku modules/functions, or whose pure versions are to be reused\n in downstream code. This utility enables this by applying\n :func:`transform` to an arbitrary tree of Haiku functions which share modules\n and have a common ``init`` function.\n\n ``f`` is expected to return a tuple of two elements. First is a ``template``\n Haiku function which provides an example of how all internal Haiku modules are\n connected. This function is used to create a common ``init`` function (with\n your parameters).\n\n The second object is an arbitrary tree of Haiku functions all of which reuse\n the modules connected in the ``template`` function. These functions are\n transformed to pure ``apply`` functions.\n\n Example:\n\n >>> def f():\n ... encoder = hk.Linear(1, name=\"encoder\")\n ... decoder = hk.Linear(1, name=\"decoder\")\n ...\n ... def init(x):\n ... z = encoder(x)\n ... return decoder(z)\n ...\n ... return init, (encoder, decoder)\n\n >>> f = hk.multi_transform(f)\n >>> rng = jax.random.PRNGKey(42)\n >>> x = jnp.ones([1, 1])\n >>> params = f.init(rng, x)\n >>> jax.tree_map(jnp.shape, params)\n {'decoder': {'b': (1,), 'w': (1, 1)},\n 'encoder': {'b': (1,), 'w': (1, 1)}}\n\n >>> encode, decode = f.apply\n >>> z = encode(params, None, x)\n >>> y = decode(params, None, z)\n\n Args:\n f: A factory function that returns two functions, firstly a common init\n function that creates all modules, and secondly a pytree of apply\n functions which make use of those modules.\n\n Returns:\n A :class:`MultiTransformed` instance which contains a pure init function\n that creates all parameters, and a pytree of pure apply functions that\n given the params apply the given function.\n\n See also:\n :func:`multi_transform_with_state`: Equivalent for modules using state.\n \"\"\"\n analytics.log_once('multi_transform')\n\n f = multi_transform_with_state(f)\n f = without_state(f)\n return f\n\n\ndef without_state(f: MultiTransformedWithState) -> MultiTransformed:\n \"\"\"Converts ``MultiTransformedWithState`` to ``MultiTransformed``.\"\"\"\n def init_fn(rng, *args, **kwargs) -> hk.Params:\n params, state = f.init(rng, *args, **kwargs)\n if state:\n raise ValueError(\n 'If your transformed function uses `hk.{get,set}_state` then use '\n '`hk.multi_transform_with_state`.')\n return params\n\n def apply_without_state(orig_apply_fn) -> Any:\n def apply_fn(params: hk.Params, rng, *args, **kwargs):\n out, state = orig_apply_fn(params, {}, rng, *args, **kwargs)\n if state:\n raise ValueError(\n 'If your transformed function uses `hk.{get,set}_state` then use '\n '`hk.multi_transform_with_state`.')\n return out\n return apply_fn\n\n apply_fns = jax.tree_map(apply_without_state, f.apply)\n\n return MultiTransformed(init_fn, apply_fns)\n\n\ndef make_tree(f: Callable[[int], Any], treedef: PyTreeDef):\n leaves = list(map(f, range(treedef.num_leaves)))\n return jax.tree_unflatten(treedef, leaves)\n","sub_path":"haiku/_src/multi_transform.py","file_name":"multi_transform.py","file_ext":"py","file_size_in_byte":8696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"20998550","text":"import requests\nimport json\nimport time\nfrom collections import deque\n\n\nclass Graph:\n def __init__(self):\n self.rooms = dict()\n self.visited = set()\n\n def add(self, room):\n if room.id not in self.rooms:\n self.rooms[room.id] = room\n else:\n return None\n\n def visit(self, room):\n self.visited.add(room.id)\n\n def has_been_visited(self, room):\n return room.id in self.visited\n\n def save(self):\n out_dict = dict()\n for key, room in self.rooms.items():\n out_dict[key] = eval(str(room))\n\n with open('output.json', 'w') as please_work:\n please_work.write(json.dumps(out_dict))\n\n def total_unexplored_rooms(self):\n unexplored = list()\n for key, room in self.rooms.items():\n exits = [room.get_room_in_direction(d) for d in room.get_exits()]\n if '?' in exits:\n unexplored.append(key)\n\n return len(unexplored)\n\n\nclass Room:\n def __init__(self, id, title, description, x, y):\n self.id = id\n self.title = title\n self.description = description\n self.x = x\n self.y = y\n self.n_to = None\n self.s_to = None\n self.e_to = None\n self.w_to = None\n\n def __str__(self):\n exits = dict()\n for x in self.get_exits():\n exits[x] = \"?\" if type(self.get_room_in_direction(\n x)) == str else self.get_room_in_direction(x).id\n\n return f'[{(self.x, self.y)}, {exits}]'\n\n def __repr__(self):\n exits = dict()\n for x in self.get_exits():\n exits[x] = '?' if type(self.get_room_in_direction(\n x)) == str else self.get_room_in_direction(x).id\n\n return f\"[{(self.x, self.y)}, {exits}]\"\n\n def get_exits(self):\n exits = []\n if self.n_to is not None:\n exits.append(\"n\")\n if self.s_to is not None:\n exits.append(\"s\")\n if self.w_to is not None:\n exits.append(\"w\")\n if self.e_to is not None:\n exits.append(\"e\")\n return exits\n\n def get_exits_string(self):\n return f\"Exits: [{', '.join(self.get_exits())}]\"\n\n def connect_rooms(self, direction, connecting_rooms):\n if direction == \"n\":\n self.n_to = connecting_rooms\n connecting_rooms.s_to = self\n elif direction == \"s\":\n self.s_to = connecting_rooms\n connecting_rooms.n_to = self\n elif direction == \"e\":\n self.e_to = connecting_rooms\n connecting_rooms.w_to = self\n elif direction == \"w\":\n self.w_to = connecting_rooms\n connecting_rooms.e_to = self\n else:\n print(\"INVALID ROOM CONNECTION\")\n return None\n\n def get_room_in_direction(self, direction):\n if direction == \"n\":\n return self.n_to\n elif direction == \"s\":\n return self.s_to\n elif direction == \"e\":\n return self.e_to\n elif direction == \"w\":\n return self.w_to\n else:\n return None\n\n def getCoords(self):\n return [self.x, self.y]\n\n\nclass Player:\n def __init__(self, name, token, starting_room):\n self.name = name\n self.token = token\n self.current_room = starting_room\n\n def travel(self, id):\n self.current_room = graph.rooms[id]\n\n\ndef bft():\n visited = set()\n queue = deque()\n queue.append([{player.current_room.id: None}])\n\n while len(queue) > 0:\n path = queue.popleft()\n vert = list(path[-1])[0]\n\n if vert not in visited:\n for new_exit in graph.rooms[vert].get_exits():\n new_path = list(path)\n\n if graph.rooms[vert].get_room_in_direction(new_exit) != '?':\n output = {graph.rooms[vert].get_room_in_direction(new_exit).id: new_exit\n }\n else:\n output = {'?': new_exit}\n\n new_path.append(output)\n queue.append(new_path)\n\n if graph.rooms[vert].get_room_in_direction(new_exit) == '?':\n return [\n list(directions.values())[0] for directions in new_path[1:]\n ]\n visited.add(vert)\n\n\nBASE_URL = 'https://lambda-treasure-hunt.herokuapp.com/api/adv'\nheaders = {\n 'Authorization': 'Token 735f59e052bf0edf98fa68005e65388a08f729b6'\n}\nprint('first pay', headers)\nres = requests.get(f\"{BASE_URL}/init\", headers=headers).json()\nprint('first res', res)\nid = res.get('room_id')\ntitle = res.get('title')\ncooldown = res.get('cooldown')\ndescription = res.get('description')\ncoords = res.get('coordinates')\nprint(coords, title, 'First Print')\nx, y = eval(coords)\nexits = res.get('exits')\n\n# Create room and mark exits with questionmark\nfirst_room = Room(id, title, description, x, y)\nfirst_room.n_to = \"?\" if \"n\" in exits else None\nfirst_room.s_to = \"?\" if \"s\" in exits else None\nfirst_room.e_to = \"?\" if \"e\" in exits else None\nfirst_room.w_to = \"?\" if \"w\" in exits else None\n\ngraph = Graph()\nplayer = Player('Leigh-Ann',\n '735f59e052bf0edf98fa68005e65388a08f729b6',\n first_room)\n\ngraph.add(first_room)\ntime.sleep(cooldown)\n\nprev_direction = None\nnew_path = bft()\n\nwhile graph.total_unexplored_rooms() > 0:\n for index, direction in enumerate(new_path):\n next_room = player.current_room.get_room_in_direction(\n direction) if player.current_room.get_room_in_direction(direction) != '?' else None\n\n if next_room is not None:\n payload = {\n 'direction': direction,\n 'next_room_id': str(next_room.id)\n }\n else:\n payload = {\n 'direction': direction\n }\n print('second pay', payload, headers)\n res = requests.post(f\"{BASE_URL}/move/\",\n headers=headers, json=payload).json()\n print('Second res', res)\n id = res.get('room_id')\n title = res.get('title')\n cooldown = res.get('cooldown')\n description = res.get('description')\n coords = res.get('coordinates')\n print(coords, title, 'Second Print')\n x, y = eval(str(coords))\n exits = res.get('exits')\n\n if id not in graph.rooms:\n new_room = Room(id, title, description, x, y)\n new_room.n_to = \"?\" if \"n\" in exits else None\n new_room.s_to = \"?\" if \"s\" in exits else None\n new_room.e_to = \"?\" if \"e\" in exits else None\n new_room.w_to = \"?\" if \"w\" in exits else None\n graph.add(new_room)\n else:\n new_room = graph.rooms[id]\n\n if player.current_room.get_room_in_direction(direction) == '?':\n player.current_room.connect_rooms(direction, new_room)\n\n player.travel(id)\n prev_direction = direction\n graph.save()\n time.sleep(cooldown)\n new_path = bft()\n print(new_path)\n print(new_room)\n","sub_path":"scripts/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":7053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"345243752","text":"import webbrowser\n\n\"\"\"this class constructs a Movie object, and contains 6 attributes: title, storyline,\nposter image URL, youtube trailer URL, release date and cast list. \"\"\"\nclass Movie():\n def __init__(self, movie_title, movie_storyline, poster_image,\n trailer_youtube, released_date, cast_list):\n \"\"\"this is the constructor method which takes the 6 attributes as inputs\n and turns them into properties of the object created. \"\"\"\n self.title = movie_title\n self.storyline = movie_storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube\n self.release_date = released_date\n self.cast = cast_list\n","sub_path":"Movie Project/media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"188473210","text":"#coding=utf-8\r\n\r\nreLogHeader = re.compile(r'(^\\d{4}-\\d{2}\\-\\d{2} \\d{2}:\\d{2}:\\d{2}\\.\\d{3})\\t')\r\nclass CRegexMergeLogFile():\r\n def __init__(self):\r\n self.strLogHeader = ''\r\n self.strLog = ''\r\n self.strNextLogHeader = ''\r\n self.strNextLog = ''\r\n \r\n def GetCurTime(self):\r\n '''当前Log时间'''\r\n if self.strLogHeader != '':\r\n return self.strLogHeader\r\n self.GetALog() \r\n return self.strLogHeader\r\n \r\n def GetALog(self):\r\n strLogHeader = ''\r\n strLog = ''\r\n strNextLogHeader = ''\r\n strNextLog = ''\r\n while True:\r\n line = self.f.readline()\r\n if not line:\r\n break #一个文件处理结束\r\n \r\n line = line.replace('\\r\\n', '\\r')\r\n line = line.replace('\\n', '\\r')\r\n m = reLogHeader.match(line)\r\n if m:\r\n if strLogHeader=='': #一个log的开始\r\n strLogHeader = m.group(0)\r\n strLog = line\r\n else: #到达下一个log了\r\n strNextLogHeader = m.group(0)\r\n strNextLog = line\r\n break\r\n elif strLogHeader!='':\r\n strLog += line\r\n else:\r\n ErrPrint('不支持的log格式')\r\n return False\r\n \r\n self.strLogHeader = strLogHeader\r\n self.strLog = strLog\r\n self.strNextLog = strNextLog\r\n self.strNextLogHeader = strNextLogHeader\r\n return strLog!=''\r\n \r\n def GetLogs(self, lstLogs, strHeaderBegin, strHeaderEnd):\r\n '''从当前文件位置读取指定区间的log'''\r\n if (self.strLog>=strHeaderBegin) & (self.strLog<=strHeaderEnd):\r\n lstLogs.append(self.strLog)\r\n if (self.strNextLog>=strHeaderBegin) & (self.strNextLog<=strHeaderEnd):\r\n lstLogs.append(self.strNextLog)\r\n \r\n strLogHeader = ''\r\n strLog = ''\r\n while True:\r\n line = self.f.readline()\r\n if not line:\r\n if strLog != '': \r\n lstLogs.append(strLog)\r\n break #一个文件处理结束\r\n \r\n line = line.replace('\\r\\n', '\\r')\r\n line = line.replace('\\n', '\\r')\r\n if line =='\\r': #过滤空行\r\n continue\r\n \r\n m = reLogHeader.match(line)\r\n if m:\r\n strMatchHeader = m.group(0)\r\n if (strMatchHeaderstrHeaderEnd): \r\n if strLog != '': \r\n lstLogs.append(strLog)\r\n break\r\n if strLogHeader=='': #一个log的开始\r\n strLogHeader = strMatchHeader\r\n strLog = line\r\n else: #到达下一个log了\r\n lstLogs.append(strLog)\r\n strLogHeader = strMatchHeader\r\n strLog = line\r\n elif strLogHeader!='':\r\n strLog += line\r\n\r\n return lstLogs\r\n","sub_path":"python/demo/xPyQTDemo/LogDlgThread.py","file_name":"LogDlgThread.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"571525317","text":"#ex1-1\nfrom numpy import *\n\nfor i in range(1, 10):\n for j in range(1, 10):\n print(\"%d*%d=%2d\" % (i, j, i * j), end=\" \")\n print(\"\")\n\n#ex1-2\ndef swap(x, y):\n a = x\n x = y\n y = a\n print(x, y)\n\nk = swap(2020, 5050)\nprint(k)\n\n#ex1-3\ndef judge_big_lower(x, y):\n a = x\n b = y\n if a > b:\n print(\"a>b\")\n elif a < b:\n print(\"a90:\n bonus = 8000\nif 7090:\n bonus = 8000\nif 7090:\n bonus = 8000\nif 7090:\n bonus = 8000\nif 7090:\n bonus = 8000\nif 7090:\n bonus = 8000\nif 7090:\n bonus = 8000\nif 7090:\n bonus = 8000\nif 7090:\n bonus = 8000\nif 70= 90:\n x = 8000 + overtime*200\n if 70 str:\n lookup = ('utf-8', 'utf_8',\n 'euc_jp', 'euc_jis_2004', 'euc_jisx0213',\n 'cp932',\n 'shift_jis', 'shift_jis_2004', 'shift_jisx0213',\n 'iso2022jp', 'iso2022_jp_1', 'iso2022_jp_2', 'iso2022_jp_3',\n 'iso2022_jp_ext', 'latin_1', 'ascii')\n for encoding in lookup:\n try:\n fp = open(filename, 'r', encoding=encoding)\n fp.read()\n return encoding\n except:\n pass\n finally:\n fp.close()\n\n return 'unknown'\n\n\ndef print_file_encoding(filename: str) -> None:\n print('[' + get_file_encoding(filename) + '] : ' + filename)\n\n\nif __name__ == \"__main__\":\n files = []\n exts = ['h', 'cpp']\n for ext in exts:\n # スクリプト配置フォルダのファイル一覧��得\n files.extend(glob.glob(\"*.\" + ext))\n # エクスプローラ同様に名前順にする\n files.sort()\n for filename in files:\n print_file_encoding(filename)\n","sub_path":"checkenc.py","file_name":"checkenc.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"619731127","text":"# -*- coding: utf-8 -*-\n\nimport multiprocessing\n\n\nclass Process(multiprocessing.Process):\n \"\"\"\n Class for parsing process\n \"\"\"\n\n def __init__(self, jobs_queue, results_queue, settings, db_settings,\n console_log, debug_log, error_log):\n \"\"\"\n Initializes parsing process.\n\n @type jobs_queue: multiprocessing.Queue\n @param jobs_queue: queue of jobs in multiprocessing pool\n @type results_queue: multiprocessing.Queue\n @param results_queue: queue of results in multiprocessing pool\n @type settings: dict\n @param settings: dictionary with all settings\n @type db_settings: dict\n @param db_settings: dictionary with database settings\n @type debug_log: multiprocessing.JoinableQueue\n @param debug_log: debug logging queue\n @type error_log: multiprocessing.JoinableQueue\n @param error_log: errors logging queue\n \"\"\"\n super(Process, self).__init__() # call parent's constructor\n self.jobs_queue = jobs_queue\n self.results_queue = results_queue\n self.settings = settings\n self.db_settings = db_settings\n self.console_log = console_log\n self.debug_log = debug_log\n self.error_log = error_log\n\n def run(self):\n \"\"\"\n Runs parsing process and works with its queues.\n\n @return: void\n \"\"\"\n while True:\n next_job = self.jobs_queue.get()\n if next_job is None:\n # None is a 'poison pill' that means shutdown of process\n self.jobs_queue.task_done()\n break\n job_result = next_job.call(self.settings, self.db_settings,\n self.debug_log, self.error_log)\n s = next_job.get_request_result()\n self.console_log.put(s)\n self.debug_log.put(s)\n self.jobs_queue.task_done()\n self.results_queue.put(job_result)\n return\n","sub_path":"python/Process.py","file_name":"Process.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"542217540","text":"import json\nimport pathlib\nimport re\nimport requests.exceptions\nimport time\n\nimport kubernetes\nimport pytest\nfrom pytest_bdd import given, parsers, then, when\nimport yaml\n\nfrom tests import kube_utils\nfrom tests import utils\n\n\n# Pytest command-line options\ndef pytest_addoption(parser):\n parser.addoption(\n \"--iso-root\",\n action=\"store\",\n default=\"_build/root\",\n type=pathlib.Path,\n help=\"Root of the ISO file tree.\",\n )\n\n\n# Fixtures {{{\n\n\n@pytest.fixture(scope=\"module\")\ndef version(request, host):\n iso_root = request.config.getoption(\"--iso-root\")\n product_path = iso_root / \"product.txt\"\n with host.sudo():\n return host.check_output(\"source %s && echo $VERSION\", str(product_path))\n\n\n@pytest.fixture(scope=\"module\")\ndef hostname(host):\n \"\"\"Return the result of `hostname` on the `host` fixture.\n\n The hostname registered in the SSH config file may not match the one known\n by the server, especially if running tests locally.\n \"\"\"\n return host.check_output(\"hostname\")\n\n\n@pytest.fixture(scope=\"module\")\ndef nodename(host):\n \"\"\"Return the kubernetes node name of the `host`\n\n Node name need to be equal to the salt minion id so just retrieve the\n salt minion id\n \"\"\"\n return utils.get_grain(host, \"id\")\n\n\n@pytest.fixture(scope=\"module\")\ndef control_plane_ip(host):\n \"\"\"Return the Kubernetes control plane IP based on the salt grain\"\"\"\n return utils.get_grain(host, \"metalk8s:control_plane_ip\")\n\n\n@pytest.fixture(scope=\"module\")\ndef kubeconfig_data(request, host):\n \"\"\"Fixture to generate a kubeconfig file for remote usage.\"\"\"\n with host.sudo():\n kubeconfig_file = host.file(\"/etc/kubernetes/admin.conf\")\n if not kubeconfig_file.exists:\n pytest.skip(\n \"Must be run on bootstrap node, or have an existing file at \"\n \"/etc/kubernetes/admin.conf\"\n )\n return yaml.safe_load(kubeconfig_file.content_string)\n\n\n@pytest.fixture\ndef kubeconfig(kubeconfig_data, tmp_path):\n kubeconfig_path = tmp_path / \"admin.conf\"\n kubeconfig_path.write_text(yaml.dump(kubeconfig_data), encoding=\"utf-8\")\n return str(kubeconfig_path) # Need Python 3.6 to open() a Path object\n\n\n@pytest.fixture\ndef control_plane_ingress_ip(k8s_client):\n \"\"\"Return the Control Plane Ingress IP from Kubernetes service\"\"\"\n ingress_svc = k8s_client.read_namespaced_service(\n name=\"ingress-nginx-control-plane-controller\",\n namespace=\"metalk8s-ingress\",\n )\n return ingress_svc.spec.load_balancer_ip or ingress_svc.spec.external_i_ps[0]\n\n\n@pytest.fixture\ndef control_plane_ingress_ep(k8s_client, control_plane_ingress_ip):\n \"\"\"Return the Control Plane Ingress Endpoint from Kubernetes service\"\"\"\n ingress_svc = k8s_client.read_namespaced_service(\n name=\"ingress-nginx-control-plane-controller\",\n namespace=\"metalk8s-ingress\",\n )\n ingress_port = ingress_svc.spec.ports[0].port\n\n return \"https://{}:{}\".format(control_plane_ingress_ip, ingress_port)\n\n\n@pytest.fixture\ndef k8s_apiclient(kubeconfig):\n \"\"\"Return an ApiClient to use for interacting with all K8s APIs.\"\"\"\n return kubernetes.config.new_client_from_config(\n config_file=kubeconfig, persist_config=False\n )\n\n\n@pytest.fixture\ndef k8s_client(request, k8s_apiclient):\n \"\"\"Parametrized fixture to instantiate a client for a single K8s API.\n\n By default, this will return a CoreV1Api client.\n One can decorate a test function to use another API, like so:\n\n ```\n @pytest.mark.parametrize(\n 'k8s_client', ['AppsV1Api'], indirect=True\n )\n def test_something(k8s_client):\n assert k8s_client.list_namespaced_deployment(namespace=\"default\")\n ```\n\n FIXME: this is not working as of right now, since `pytest-bdd` manipulates\n fixtures in its own way through the various scenario/when/then/given\n decorators.\n \"\"\"\n api_name = getattr(request, \"param\", \"CoreV1Api\")\n api_cls = getattr(kubernetes.client, api_name, None)\n\n if api_cls is None:\n pytest.fail(\n \"Unknown K8s API '{}' to use with `k8s_client` fixture.\".format(api_name)\n )\n\n return api_cls(api_client=k8s_apiclient)\n\n\n@pytest.fixture\ndef admin_sa(k8s_client, k8s_apiclient):\n \"\"\"Fixture to create a ServiceAccount which is bind to `cluster-admin`\n ClusterRole and return the ServiceAccount name\n \"\"\"\n rbac_k8s_client = kubernetes.client.RbacAuthorizationV1Api(api_client=k8s_apiclient)\n sa_name = \"test-admin\"\n sa_namespace = \"default\"\n sa_manifest = {\n \"apiVersion\": \"v1\",\n \"kind\": \"ServiceAccount\",\n \"metadata\": {\"name\": sa_name, \"namespace\": sa_namespace},\n }\n crb_manifest = {\n \"apiVersion\": \"rbac.authorization.k8s.io/v1\",\n \"kind\": \"ClusterRoleBinding\",\n \"metadata\": {\"name\": sa_name},\n \"roleRef\": {\n \"apiGroup\": \"rbac.authorization.k8s.io\",\n \"kind\": \"ClusterRole\",\n \"name\": \"cluster-admin\",\n },\n \"subjects\": [\n {\"kind\": \"ServiceAccount\", \"name\": sa_name, \"namespace\": sa_namespace}\n ],\n }\n\n k8s_client.create_namespaced_service_account(\n body=sa_manifest, namespace=sa_namespace\n )\n rbac_k8s_client.create_cluster_role_binding(body=crb_manifest)\n\n def _check_crb_exists():\n try:\n rbac_k8s_client.read_cluster_role_binding(name=sa_name)\n except kubernetes.client.rest.ApiException as err:\n if err.status == 404:\n raise AssertionError(\"ClusterRoleBinding not yet created\")\n raise\n\n def _check_sa_exists():\n try:\n sa_obj = k8s_client.read_namespaced_service_account(\n name=sa_name, namespace=sa_namespace\n )\n except kubernetes.client.rest.ApiException as err:\n if err.status == 404:\n raise AssertionError(\"ServiceAccount not yet created\")\n raise\n\n assert sa_obj.secrets\n assert sa_obj.secrets[0].name\n\n try:\n secret_obj = k8s_client.read_namespaced_secret(\n sa_obj.secrets[0].name, sa_namespace\n )\n except kubernetes.client.rest.ApiException as err:\n if err.status == 404:\n raise AssertionError(\"Secret not yet created\")\n raise\n\n assert secret_obj.data.get(\"token\")\n\n # Wait for ClusterRoleBinding to exists\n utils.retry(_check_crb_exists, times=20, wait=3)\n\n # Wait for ServiceAccount to exists\n utils.retry(_check_sa_exists, times=20, wait=3)\n\n yield (sa_name, sa_namespace)\n\n try:\n rbac_k8s_client.delete_cluster_role_binding(\n name=sa_name,\n body=kubernetes.client.V1DeleteOptions(propagation_policy=\"Foreground\"),\n )\n except kubernetes.client.rest.ApiException:\n pass\n\n k8s_client.delete_namespaced_service_account(\n name=sa_name,\n namespace=sa_namespace,\n body=kubernetes.client.V1DeleteOptions(propagation_policy=\"Foreground\"),\n )\n\n\n@pytest.fixture(scope=\"module\")\ndef bootstrap_config(host):\n with host.sudo():\n config_file = host.file(\"/etc/metalk8s/bootstrap.yaml\")\n if not config_file.exists:\n pytest.skip(\"Must be run on bootstrap node\")\n return yaml.safe_load(config_file.content_string)\n\n\n@pytest.fixture\ndef registry_address(host, version):\n with host.sudo():\n registry_json = host.check_output(\n \"salt-call --out json slsutil.renderer string='\"\n '{% from \"metalk8s/map.jinja\" import repo with context %}'\n \"{{ repo.registry_endpoint }}' \"\n \"saltenv='metalk8s-\" + str(version) + \"'\"\n )\n return json.loads(registry_json)[\"local\"]\n\n\n@pytest.fixture\ndef utils_image(registry_address, version):\n return \"{registry_address}/{repository}/{image}\".format(\n registry_address=registry_address,\n repository=\"metalk8s-{}\".format(version),\n image=\"metalk8s-utils:{}\".format(version),\n )\n\n\n@pytest.fixture\ndef ssh_config(request):\n return request.config.getoption(\"--ssh-config\")\n\n\n@pytest.fixture\ndef prometheus_api(control_plane_ingress_ep):\n return utils.PrometheusApi(endpoint=control_plane_ingress_ep)\n\n\ndef count_running_pods(request, k8s_client, pods_count, label, namespace, node):\n ssh_config = request.config.getoption(\"--ssh-config\")\n\n def _check_pods_count():\n pods = kube_utils.get_pods(\n k8s_client,\n ssh_config,\n label,\n node,\n namespace=namespace,\n state=\"Running\",\n )\n assert len(pods) == pods_count\n\n error_msg = (\n \"There is not exactly '{count}' pod(s) labeled '{label}' running \"\n \"in namespace '{namespace}'\".format(\n count=pods_count, label=label, namespace=namespace\n )\n )\n if node:\n error_msg += \"on node '{node}'\".format(node=node)\n\n utils.retry(_check_pods_count, times=40, wait=3, error_msg=error_msg)\n\n\n_COUNT_RUNNING_PODS_PARSER = parsers.re(\n r\"we have (?P\\d+) running pod labeled '(?P